summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-09-29 12:46:30 -0700
committerGitHub <noreply@github.com>2017-09-29 12:46:30 -0700
commitb84736e9b6401df0c6eeab9950bef09458a6aefd (patch)
treed9175208de3236db75a33879750a57b3000ba096 /vendor/github.com
parent8b9dbb86133ff0fd6002a391268383d1593918ca (diff)
downloadchat-b84736e9b6401df0c6eeab9950bef09458a6aefd.tar.gz
chat-b84736e9b6401df0c6eeab9950bef09458a6aefd.tar.bz2
chat-b84736e9b6401df0c6eeab9950bef09458a6aefd.zip
Updating server dependancies. (#7538)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/NYTimes/gziphandler/LICENSE201
-rw-r--r--vendor/github.com/NYTimes/gziphandler/LICENSE.md13
-rw-r--r--vendor/github.com/NYTimes/gziphandler/gzip.go4
-rw-r--r--vendor/github.com/NYTimes/gziphandler/gzip_test.go19
-rw-r--r--vendor/github.com/armon/go-metrics/prometheus/prometheus.go6
-rw-r--r--vendor/github.com/cpanato/html2text/LICENSE1
-rw-r--r--vendor/github.com/disintegration/imaging/.travis.yml5
-rw-r--r--vendor/github.com/disintegration/imaging/helpers.go411
-rw-r--r--vendor/github.com/go-redis/redis/.travis.yml1
-rw-r--r--vendor/github.com/go-redis/redis/README.md3
-rw-r--r--vendor/github.com/go-redis/redis/cluster.go366
-rw-r--r--vendor/github.com/go-redis/redis/cluster_commands.go22
-rw-r--r--vendor/github.com/go-redis/redis/cluster_test.go79
-rw-r--r--vendor/github.com/go-redis/redis/command.go86
-rw-r--r--vendor/github.com/go-redis/redis/commands.go5
-rw-r--r--vendor/github.com/go-redis/redis/commands_test.go55
-rw-r--r--vendor/github.com/go-redis/redis/export_test.go14
-rw-r--r--vendor/github.com/go-redis/redis/internal/error.go27
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool.go25
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/reader.go2
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/scan.go2
-rw-r--r--vendor/github.com/go-redis/redis/main_test.go4
-rw-r--r--vendor/github.com/go-redis/redis/options.go10
-rw-r--r--vendor/github.com/go-redis/redis/pipeline.go10
-rw-r--r--vendor/github.com/go-redis/redis/pool_test.go10
-rw-r--r--vendor/github.com/go-redis/redis/pubsub.go5
-rw-r--r--vendor/github.com/go-redis/redis/pubsub_test.go6
-rw-r--r--vendor/github.com/go-redis/redis/redis.go92
-rw-r--r--vendor/github.com/go-redis/redis/ring.go121
-rw-r--r--vendor/github.com/go-redis/redis/sentinel.go6
-rw-r--r--vendor/github.com/go-redis/redis/tx.go23
-rw-r--r--vendor/github.com/go-redis/redis/universal.go7
-rw-r--r--vendor/github.com/golang/protobuf/.travis.yml1
-rw-r--r--vendor/github.com/golang/protobuf/README.md1
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb.go66
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go107
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go425
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto28
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go63
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go29
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto1
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go10
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto10
-rw-r--r--vendor/github.com/gorilla/mux/README.md50
-rw-r--r--vendor/github.com/gorilla/mux/doc.go12
-rw-r--r--vendor/github.com/gorilla/mux/mux.go48
-rw-r--r--vendor/github.com/gorilla/mux/mux_test.go145
-rw-r--r--vendor/github.com/gorilla/mux/old_test.go8
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go5
-rw-r--r--vendor/github.com/gorilla/mux/route.go50
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go2
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder_test.go9
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go14
-rw-r--r--vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl1
-rw-r--r--vendor/github.com/hashicorp/hcl/test-fixtures/float.json3
-rw-r--r--vendor/github.com/hashicorp/memberlist/memberlist.go4
-rw-r--r--vendor/github.com/lib/pq/.travis.yml1
-rw-r--r--vendor/github.com/lib/pq/conn.go34
-rw-r--r--vendor/github.com/lib/pq/encode.go12
-rw-r--r--vendor/github.com/lib/pq/encode_test.go9
-rw-r--r--vendor/github.com/lib/pq/oid/gen.go59
-rw-r--r--vendor/github.com/lib/pq/oid/types.go172
-rw-r--r--vendor/github.com/lib/pq/rows.go93
-rw-r--r--vendor/github.com/lib/pq/rows_test.go220
-rw-r--r--vendor/github.com/magiconair/properties/.travis.yml1
-rw-r--r--vendor/github.com/magiconair/properties/CHANGELOG.md5
-rw-r--r--vendor/github.com/magiconair/properties/properties.go3
-rw-r--r--vendor/github.com/magiconair/properties/properties_test.go13
-rw-r--r--vendor/github.com/mattermost/html2text/LICENSE1
-rw-r--r--vendor/github.com/miekg/dns/CONTRIBUTORS1
-rw-r--r--vendor/github.com/miekg/dns/client.go332
-rw-r--r--vendor/github.com/miekg/dns/client_test.go168
-rw-r--r--vendor/github.com/miekg/dns/dnsutil/util.go2
-rw-r--r--vendor/github.com/miekg/dns/doc.go30
-rw-r--r--vendor/github.com/miekg/dns/edns.go15
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/cmsghdr.go7
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_32bit.go20
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_64bit.go20
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/cmsghdr_other.go13
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/controlmessage.go118
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/controlmessage_test.go103
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/socket.go4
-rw-r--r--vendor/github.com/miekg/dns/internal/socket/sys.go14
-rw-r--r--vendor/github.com/miekg/dns/msg_helpers.go7
-rw-r--r--vendor/github.com/miekg/dns/parse_test.go190
-rw-r--r--vendor/github.com/miekg/dns/privaterr_test.go2
-rw-r--r--vendor/github.com/miekg/dns/scan.go129
-rw-r--r--vendor/github.com/miekg/dns/scan_rr.go590
-rw-r--r--vendor/github.com/miekg/dns/scan_test.go5
-rw-r--r--vendor/github.com/miekg/dns/server_test.go10
-rw-r--r--vendor/github.com/miekg/dns/udp.go15
-rw-r--r--vendor/github.com/miekg/dns/udp_linux.go115
-rw-r--r--vendor/github.com/miekg/dns/udp_linux_test.go68
-rw-r--r--vendor/github.com/miekg/dns/udp_other.go10
-rw-r--r--vendor/github.com/miekg/dns/udp_windows.go5
-rw-r--r--vendor/github.com/minio/minio-go/.travis.yml1
-rw-r--r--vendor/github.com/minio/minio-go/MAINTAINERS.md21
-rw-r--r--vendor/github.com/minio/minio-go/README.md31
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go26
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go30
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-streaming.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go17
-rw-r--r--vendor/github.com/minio/minio-go/api.go43
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go44
-rw-r--r--vendor/github.com/minio/minio-go/appveyor.yml1
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go14
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md47
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go53
-rw-r--r--vendor/github.com/minio/minio-go/functional_tests.go2253
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go5
-rw-r--r--vendor/github.com/mssola/user_agent/.travis.yml11
-rw-r--r--vendor/github.com/mssola/user_agent/LICENSE2
-rw-r--r--vendor/github.com/mssola/user_agent/README.md4
-rw-r--r--vendor/github.com/mssola/user_agent/all_test.go670
-rw-r--r--vendor/github.com/mssola/user_agent/bot.go14
-rw-r--r--vendor/github.com/mssola/user_agent/browser.go40
-rw-r--r--vendor/github.com/mssola/user_agent/operating_systems.go107
-rw-r--r--vendor/github.com/mssola/user_agent/user_agent.go15
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/.travis.yml2
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go4
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/plurals.xml18
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/language_test.go4
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go20
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/operands_test.go24
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go5
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go166
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen_test.go52
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_test.go27
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go2
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go2
-rw-r--r--vendor/github.com/pelletier/go-toml/.travis.yml2
-rw-r--r--vendor/github.com/pelletier/go-toml/README.md9
-rw-r--r--vendor/github.com/pelletier/go-toml/query/parser.go2
-rw-r--r--vendor/github.com/pkg/errors/.travis.yml1
-rw-r--r--vendor/github.com/prometheus/common/promlog/flag/flag.go33
-rw-r--r--vendor/github.com/prometheus/common/promlog/log.go63
-rw-r--r--vendor/github.com/spf13/afero/match.go110
-rw-r--r--vendor/github.com/spf13/afero/match_test.go183
-rw-r--r--vendor/github.com/spf13/afero/memmap.go5
-rw-r--r--vendor/github.com/spf13/afero/memmap_test.go41
-rw-r--r--vendor/github.com/spf13/cobra/README.md442
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go36
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions_test.go2
-rw-r--r--vendor/github.com/spf13/cobra/command.go5
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go20
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/notepad.go2
-rw-r--r--vendor/github.com/xenolf/lego/CHANGELOG.md11
-rw-r--r--vendor/github.com/xenolf/lego/Dockerfile15
-rw-r--r--vendor/github.com/xenolf/lego/README.md4
-rw-r--r--vendor/github.com/xenolf/lego/cli.go4
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/azure/azure.go7
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go2
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go2
153 files changed, 7033 insertions, 3096 deletions
diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE b/vendor/github.com/NYTimes/gziphandler/LICENSE
new file mode 100644
index 000000000..df6192d36
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2017 The New York Times Company
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md
deleted file mode 100644
index b7e2ecb63..000000000
--- a/vendor/github.com/NYTimes/gziphandler/LICENSE.md
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright (c) 2015 The New York Times Company
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this library except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go
index c47039fd3..b6af9115a 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip.go
@@ -105,7 +105,7 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If the global writes are bigger than the minSize and we're about to write
// a response containing a content type we want to handle, enable
// compression.
- if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) {
+ if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
err := w.startGzip()
if err != nil {
return 0, err
@@ -134,7 +134,7 @@ func (w *GzipResponseWriter) startGzip() error {
// Initialize the GZIP response.
w.init()
- // Flush the buffer into the gzip reponse.
+ // Flush the buffer into the gzip response.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_test.go b/vendor/github.com/NYTimes/gziphandler/gzip_test.go
index d5a7eb82b..655a19373 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip_test.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip_test.go
@@ -81,6 +81,17 @@ func TestGzipHandler(t *testing.T) {
assert.Equal(t, http.DetectContentType([]byte(testBody)), res3.Header().Get("Content-Type"))
}
+func TestGzipHandlerAlreadyCompressed(t *testing.T) {
+ handler := newTestHandler(testBody)
+
+ req, _ := http.NewRequest("GET", "/gzipped", nil)
+ req.Header.Set("Accept-Encoding", "gzip")
+ res := httptest.NewRecorder()
+ handler.ServeHTTP(res, req)
+
+ assert.Equal(t, testBody, res.Body.String())
+}
+
func TestNewGzipLevelHandler(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
@@ -435,6 +446,12 @@ func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
func newTestHandler(body string) http.Handler {
return GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, body)
+ switch r.URL.Path {
+ case "/gzipped":
+ w.Header().Set("Content-Encoding", "gzip")
+ io.WriteString(w, body)
+ default:
+ io.WriteString(w, body)
+ }
}))
}
diff --git a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go
index f31f2f9a5..a647e5965 100644
--- a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go
+++ b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go
@@ -66,7 +66,7 @@ func (p *PrometheusSink) SetGaugeWithLabels(parts []string, val float32, labels
ConstLabels: prometheusLabels(labels),
})
prometheus.MustRegister(g)
- p.gauges[key] = g
+ p.gauges[hash] = g
}
g.Set(float64(val))
}
@@ -88,7 +88,7 @@ func (p *PrometheusSink) AddSampleWithLabels(parts []string, val float32, labels
ConstLabels: prometheusLabels(labels),
})
prometheus.MustRegister(g)
- p.summaries[key] = g
+ p.summaries[hash] = g
}
g.Observe(float64(val))
}
@@ -115,7 +115,7 @@ func (p *PrometheusSink) IncrCounterWithLabels(parts []string, val float32, labe
ConstLabels: prometheusLabels(labels),
})
prometheus.MustRegister(g)
- p.counters[key] = g
+ p.counters[hash] = g
}
g.Add(float64(val))
}
diff --git a/vendor/github.com/cpanato/html2text/LICENSE b/vendor/github.com/cpanato/html2text/LICENSE
index 24dc4abec..1f2423ecb 100644
--- a/vendor/github.com/cpanato/html2text/LICENSE
+++ b/vendor/github.com/cpanato/html2text/LICENSE
@@ -1,6 +1,7 @@
The MIT License (MIT)
Copyright (c) 2015 Jay Taylor
+Modified work: Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/disintegration/imaging/.travis.yml b/vendor/github.com/disintegration/imaging/.travis.yml
index 110437d6a..4886abd74 100644
--- a/vendor/github.com/disintegration/imaging/.travis.yml
+++ b/vendor/github.com/disintegration/imaging/.travis.yml
@@ -4,12 +4,9 @@ sudo: false
go:
- 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- 1.7
- 1.8
+ - 1.9
before_install:
- go get golang.org/x/tools/cmd/cover
diff --git a/vendor/github.com/disintegration/imaging/helpers.go b/vendor/github.com/disintegration/imaging/helpers.go
index 3a4cbde80..9184ccde6 100644
--- a/vendor/github.com/disintegration/imaging/helpers.go
+++ b/vendor/github.com/disintegration/imaging/helpers.go
@@ -168,213 +168,266 @@ func New(width, height int, fillColor color.Color) *image.NRGBA {
// Clone returns a copy of the given image.
func Clone(img image.Image) *image.NRGBA {
- srcBounds := img.Bounds()
- srcMinX := srcBounds.Min.X
- srcMinY := srcBounds.Min.Y
-
- dstBounds := srcBounds.Sub(srcBounds.Min)
- dstW := dstBounds.Dx()
- dstH := dstBounds.Dy()
+ dstBounds := img.Bounds().Sub(img.Bounds().Min)
dst := image.NewNRGBA(dstBounds)
switch src := img.(type) {
-
case *image.NRGBA:
- rowSize := srcBounds.Dx() * 4
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize])
+ copyNRGBA(dst, src)
+ case *image.NRGBA64:
+ copyNRGBA64(dst, src)
+ case *image.RGBA:
+ copyRGBA(dst, src)
+ case *image.RGBA64:
+ copyRGBA64(dst, src)
+ case *image.Gray:
+ copyGray(dst, src)
+ case *image.Gray16:
+ copyGray16(dst, src)
+ case *image.YCbCr:
+ copyYCbCr(dst, src)
+ case *image.Paletted:
+ copyPaletted(dst, src)
+ default:
+ copyImage(dst, src)
+ }
+
+ return dst
+}
+
+func copyNRGBA(dst *image.NRGBA, src *image.NRGBA) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ rowSize := dstW * 4
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize])
+ }
+ })
+}
+
+func copyNRGBA64(dst *image.NRGBA, src *image.NRGBA64) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ dst.Pix[di+0] = src.Pix[si+0]
+ dst.Pix[di+1] = src.Pix[si+2]
+ dst.Pix[di+2] = src.Pix[si+4]
+ dst.Pix[di+3] = src.Pix[si+6]
+ di += 4
+ si += 8
}
- })
+ }
+ })
+}
- case *image.NRGBA64:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
+func copyRGBA(dst *image.NRGBA, src *image.RGBA) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ a := src.Pix[si+3]
+ dst.Pix[di+3] = a
+
+ switch a {
+ case 0:
+ dst.Pix[di+0] = 0
+ dst.Pix[di+1] = 0
+ dst.Pix[di+2] = 0
+ case 0xff:
dst.Pix[di+0] = src.Pix[si+0]
- dst.Pix[di+1] = src.Pix[si+2]
- dst.Pix[di+2] = src.Pix[si+4]
- dst.Pix[di+3] = src.Pix[si+6]
- di += 4
- si += 8
+ dst.Pix[di+1] = src.Pix[si+1]
+ dst.Pix[di+2] = src.Pix[si+2]
+ default:
+ var tmp uint16
+ tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a)
+ dst.Pix[di+0] = uint8(tmp)
+ tmp = uint16(src.Pix[si+1]) * 0xff / uint16(a)
+ dst.Pix[di+1] = uint8(tmp)
+ tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a)
+ dst.Pix[di+2] = uint8(tmp)
}
- }
- })
- case *image.RGBA:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- a := src.Pix[si+3]
- dst.Pix[di+3] = a
-
- switch a {
- case 0:
- dst.Pix[di+0] = 0
- dst.Pix[di+1] = 0
- dst.Pix[di+2] = 0
- case 0xff:
- dst.Pix[di+0] = src.Pix[si+0]
- dst.Pix[di+1] = src.Pix[si+1]
- dst.Pix[di+2] = src.Pix[si+2]
- default:
- var tmp uint16
- tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a)
- dst.Pix[di+0] = uint8(tmp)
- tmp = uint16(src.Pix[si+1]) * 0xff / uint16(a)
- dst.Pix[di+1] = uint8(tmp)
- tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a)
- dst.Pix[di+2] = uint8(tmp)
- }
-
- di += 4
- si += 4
- }
+ di += 4
+ si += 4
}
- })
+ }
+ })
+}
- case *image.RGBA64:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- a := src.Pix[si+6]
- dst.Pix[di+3] = a
-
- switch a {
- case 0:
- dst.Pix[di+0] = 0
- dst.Pix[di+1] = 0
- dst.Pix[di+2] = 0
- case 0xff:
- dst.Pix[di+0] = src.Pix[si+0]
- dst.Pix[di+1] = src.Pix[si+2]
- dst.Pix[di+2] = src.Pix[si+4]
- default:
- var tmp uint16
- tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a)
- dst.Pix[di+0] = uint8(tmp)
- tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a)
- dst.Pix[di+1] = uint8(tmp)
- tmp = uint16(src.Pix[si+4]) * 0xff / uint16(a)
- dst.Pix[di+2] = uint8(tmp)
- }
-
- di += 4
- si += 8
+func copyRGBA64(dst *image.NRGBA, src *image.RGBA64) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ a := src.Pix[si+6]
+ dst.Pix[di+3] = a
+
+ switch a {
+ case 0:
+ dst.Pix[di+0] = 0
+ dst.Pix[di+1] = 0
+ dst.Pix[di+2] = 0
+ case 0xff:
+ dst.Pix[di+0] = src.Pix[si+0]
+ dst.Pix[di+1] = src.Pix[si+2]
+ dst.Pix[di+2] = src.Pix[si+4]
+ default:
+ var tmp uint16
+ tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a)
+ dst.Pix[di+0] = uint8(tmp)
+ tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a)
+ dst.Pix[di+1] = uint8(tmp)
+ tmp = uint16(src.Pix[si+4]) * 0xff / uint16(a)
+ dst.Pix[di+2] = uint8(tmp)
}
- }
- })
- case *image.Gray:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- c := src.Pix[si]
- dst.Pix[di+0] = c
- dst.Pix[di+1] = c
- dst.Pix[di+2] = c
- dst.Pix[di+3] = 0xff
- di += 4
- si += 1
- }
+ di += 4
+ si += 8
}
- })
+ }
+ })
+}
- case *image.Gray16:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- c := src.Pix[si]
- dst.Pix[di+0] = c
- dst.Pix[di+1] = c
- dst.Pix[di+2] = c
- dst.Pix[di+3] = 0xff
- di += 4
- si += 2
- }
+func copyGray(dst *image.NRGBA, src *image.Gray) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ c := src.Pix[si]
+ dst.Pix[di+0] = c
+ dst.Pix[di+1] = c
+ dst.Pix[di+2] = c
+ dst.Pix[di+3] = 0xff
+ di += 4
+ si++
}
- })
+ }
+ })
+}
- case *image.YCbCr:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- srcX := srcMinX + dstX
- srcY := srcMinY + dstY
- siy := src.YOffset(srcX, srcY)
- sic := src.COffset(srcX, srcY)
- r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic])
- dst.Pix[di+0] = r
- dst.Pix[di+1] = g
- dst.Pix[di+2] = b
- dst.Pix[di+3] = 0xff
- di += 4
- }
+func copyGray16(dst *image.NRGBA, src *image.Gray16) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ c := src.Pix[si]
+ dst.Pix[di+0] = c
+ dst.Pix[di+1] = c
+ dst.Pix[di+2] = c
+ dst.Pix[di+3] = 0xff
+ di += 4
+ si += 2
}
- })
-
- case *image.Paletted:
- plen := len(src.Palette)
- pnew := make([]color.NRGBA, plen)
- for i := 0; i < plen; i++ {
- pnew[i] = color.NRGBAModel.Convert(src.Palette[i]).(color.NRGBA)
}
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- si := src.PixOffset(srcMinX, srcMinY+dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- c := pnew[src.Pix[si]]
- dst.Pix[di+0] = c.R
- dst.Pix[di+1] = c.G
- dst.Pix[di+2] = c.B
- dst.Pix[di+3] = c.A
- di += 4
- si += 1
- }
- }
- })
+ })
+}
- default:
- parallel(dstH, func(partStart, partEnd int) {
- for dstY := partStart; dstY < partEnd; dstY++ {
- di := dst.PixOffset(0, dstY)
- for dstX := 0; dstX < dstW; dstX++ {
- c := color.NRGBAModel.Convert(img.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA)
- dst.Pix[di+0] = c.R
- dst.Pix[di+1] = c.G
- dst.Pix[di+2] = c.B
- dst.Pix[di+3] = c.A
- di += 4
- }
+func copyYCbCr(dst *image.NRGBA, src *image.YCbCr) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ srcX := srcMinX + dstX
+ srcY := srcMinY + dstY
+ siy := src.YOffset(srcX, srcY)
+ sic := src.COffset(srcX, srcY)
+ r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic])
+ dst.Pix[di+0] = r
+ dst.Pix[di+1] = g
+ dst.Pix[di+2] = b
+ dst.Pix[di+3] = 0xff
+ di += 4
}
- })
+ }
+ })
+}
+func copyPaletted(dst *image.NRGBA, src *image.Paletted) {
+ srcMinX := src.Rect.Min.X
+ srcMinY := src.Rect.Min.Y
+ dstW := dst.Rect.Dx()
+ dstH := dst.Rect.Dy()
+ plen := len(src.Palette)
+ pnew := make([]color.NRGBA, plen)
+ for i := 0; i < plen; i++ {
+ pnew[i] = color.NRGBAModel.Convert(src.Palette[i]).(color.NRGBA)
}
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ si := src.PixOffset(srcMinX, srcMinY+dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ c := pnew[src.Pix[si]]
+ dst.Pix[di+0] = c.R
+ dst.Pix[di+1] = c.G
+ dst.Pix[di+2] = c.B
+ dst.Pix[di+3] = c.A
+ di += 4
+ si++
+ }
+ }
+ })
+}
- return dst
+func copyImage(dst *image.NRGBA, src image.Image) {
+ srcMinX := src.Bounds().Min.X
+ srcMinY := src.Bounds().Min.Y
+ dstW := dst.Bounds().Dx()
+ dstH := dst.Bounds().Dy()
+ parallel(dstH, func(partStart, partEnd int) {
+ for dstY := partStart; dstY < partEnd; dstY++ {
+ di := dst.PixOffset(0, dstY)
+ for dstX := 0; dstX < dstW; dstX++ {
+ c := color.NRGBAModel.Convert(src.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA)
+ dst.Pix[di+0] = c.R
+ dst.Pix[di+1] = c.G
+ dst.Pix[di+2] = c.B
+ dst.Pix[di+3] = c.A
+ di += 4
+ }
+ }
+ })
}
// toNRGBA converts any image type to *image.NRGBA with min-point at (0, 0).
func toNRGBA(img image.Image) *image.NRGBA {
- srcBounds := img.Bounds()
- if srcBounds.Min.X == 0 && srcBounds.Min.Y == 0 {
- if src0, ok := img.(*image.NRGBA); ok {
- return src0
- }
+ if img, ok := img.(*image.NRGBA); ok && img.Bounds().Min.Eq(image.ZP) {
+ return img
}
return Clone(img)
}
diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml
index f4666c593..f49927ee8 100644
--- a/vendor/github.com/go-redis/redis/.travis.yml
+++ b/vendor/github.com/go-redis/redis/.travis.yml
@@ -8,6 +8,7 @@ go:
- 1.4.x
- 1.7.x
- 1.8.x
+ - 1.9.x
- tip
matrix:
diff --git a/vendor/github.com/go-redis/redis/README.md b/vendor/github.com/go-redis/redis/README.md
index fd036496d..0a2a67124 100644
--- a/vendor/github.com/go-redis/redis/README.md
+++ b/vendor/github.com/go-redis/redis/README.md
@@ -6,6 +6,7 @@
Supports:
- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
- [Transactions](https://godoc.org/github.com/go-redis/redis#Multi).
- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
@@ -16,7 +17,7 @@ Supports:
- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
- [Cache friendly](https://github.com/go-redis/cache).
-- [Rate limiting](https://github.com/go-redis/rate).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
- [Distributed Locks](https://github.com/bsm/redis-lock).
API docs: https://godoc.org/github.com/go-redis/redis.
diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go
index 647a25be3..c81fc1d57 100644
--- a/vendor/github.com/go-redis/redis/cluster.go
+++ b/vendor/github.com/go-redis/redis/cluster.go
@@ -14,8 +14,8 @@ import (
"github.com/go-redis/redis/internal/proto"
)
-var errClusterNoNodes = internal.RedisError("redis: cluster has no nodes")
-var errNilClusterState = internal.RedisError("redis: cannot load cluster slots")
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+var errNilClusterState = fmt.Errorf("redis: cannot load cluster slots")
// ClusterOptions are used to configure a cluster client and should be
// passed to NewClusterClient.
@@ -64,6 +64,19 @@ func (opt *ClusterOptions) init() {
opt.ReadOnly = true
}
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
@@ -192,6 +205,21 @@ func (c *clusterNodes) Close() error {
return firstErr
}
+func (c *clusterNodes) Addrs() ([]string, error) {
+ c.mu.RLock()
+ closed := c.closed
+ addrs := c.addrs
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
func (c *clusterNodes) NextGeneration() uint32 {
c.generation++
return c.generation
@@ -272,16 +300,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
}
func (c *clusterNodes) Random() (*clusterNode, error) {
- c.mu.RLock()
- closed := c.closed
- addrs := c.addrs
- c.mu.RUnlock()
-
- if closed {
- return nil, pool.ErrClosed
- }
- if len(addrs) == 0 {
- return nil, errClusterNoNodes
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
}
var nodeErr error
@@ -468,13 +489,23 @@ func (c *ClusterClient) Options() *ClusterOptions {
return c.opt
}
-func (c *ClusterClient) state() *clusterState {
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) state() (*clusterState, error) {
v := c._state.Load()
if v != nil {
- return v.(*clusterState)
+ return v.(*clusterState), nil
}
+
+ _, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
c.lazyReloadState()
- return nil
+ return nil, errNilClusterState
}
func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
@@ -495,17 +526,22 @@ func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
if err != nil {
return nil
}
- return c.cmdsInfo[name]
+ info := c.cmdsInfo[name]
+ if info == nil {
+ internal.Logf("info for cmd=%s not found", name)
+ }
+ return info
}
-func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
- if state == nil {
- node, err := c.nodes.Random()
- return 0, node, err
- }
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ return hashtag.Slot(firstKey)
+}
+func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
slot := hashtag.Slot(firstKey)
if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
@@ -523,19 +559,51 @@ func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *cl
}
func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
- state := c.state()
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: keys don't hash to the same slot")
+ }
- var node *clusterNode
- var err error
- if state != nil && len(keys) > 0 {
- node, err = state.slotMasterNode(hashtag.Slot(keys[0]))
- } else {
- node, err = c.nodes.Random()
+ state, err := c.state()
+ if err != nil {
+ return err
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ return fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ }
}
+
+ node, err := state.slotMasterNode(slot)
if err != nil {
return err
}
- return node.Client.Watch(fn, keys...)
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
+ err = node.Client.Watch(fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := internal.IsMovedError(err)
+ if moved || ask {
+ c.lazyReloadState()
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ return err
+ }
+
+ return err
}
// Close closes the cluster client, releasing any open resources.
@@ -547,7 +615,13 @@ func (c *ClusterClient) Close() error {
}
func (c *ClusterClient) Process(cmd Cmder) error {
- slot, node, err := c.cmdSlotAndNode(c.state(), cmd)
+ state, err := c.state()
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+
+ _, node, err := c.cmdSlotAndNode(state, cmd)
if err != nil {
cmd.setErr(err)
return err
@@ -556,7 +630,7 @@ func (c *ClusterClient) Process(cmd Cmder) error {
var ask bool
for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
if attempt > 0 {
- time.Sleep(node.Client.retryBackoff(attempt))
+ time.Sleep(c.retryBackoff(attempt))
}
if ask {
@@ -572,7 +646,7 @@ func (c *ClusterClient) Process(cmd Cmder) error {
// If there is no error - we are done.
if err == nil {
- return nil
+ break
}
// If slave is loading - read from master.
@@ -582,12 +656,11 @@ func (c *ClusterClient) Process(cmd Cmder) error {
continue
}
- // On network errors try random node.
- if internal.IsRetryableError(err) || internal.IsClusterDownError(err) {
- node, err = c.nodes.Random()
- if err != nil {
- cmd.setErr(err)
- return err
+ if internal.IsRetryableError(err, true) {
+ var nodeErr error
+ node, nodeErr = c.nodes.Random()
+ if nodeErr != nil {
+ break
}
continue
}
@@ -596,20 +669,13 @@ func (c *ClusterClient) Process(cmd Cmder) error {
var addr string
moved, ask, addr = internal.IsMovedError(err)
if moved || ask {
- state := c.state()
- if state != nil && slot >= 0 {
- master, _ := state.slotMasterNode(slot)
- if moved && (master == nil || master.Client.getAddr() != addr) {
- c.lazyReloadState()
- }
- }
+ c.lazyReloadState()
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- cmd.setErr(err)
- return err
+ var nodeErr error
+ node, nodeErr = c.nodes.GetOrCreate(addr)
+ if nodeErr != nil {
+ break
}
-
continue
}
@@ -622,9 +688,9 @@ func (c *ClusterClient) Process(cmd Cmder) error {
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
- state := c.state()
- if state == nil {
- return errNilClusterState
+ state, err := c.state()
+ if err != nil {
+ return err
}
var wg sync.WaitGroup
@@ -655,9 +721,9 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
// ForEachSlave concurrently calls the fn on each slave node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
- state := c.state()
- if state == nil {
- return errNilClusterState
+ state, err := c.state()
+ if err != nil {
+ return err
}
var wg sync.WaitGroup
@@ -688,9 +754,9 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
// ForEachNode concurrently calls the fn on each known node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
- state := c.state()
- if state == nil {
- return errNilClusterState
+ state, err := c.state()
+ if err != nil {
+ return err
}
var wg sync.WaitGroup
@@ -728,27 +794,31 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
func (c *ClusterClient) PoolStats() *PoolStats {
var acc PoolStats
- state := c.state()
+ state, _ := c.state()
if state == nil {
return &acc
}
for _, node := range state.masters {
s := node.Client.connPool.Stats()
- acc.Requests += s.Requests
acc.Hits += s.Hits
+ acc.Misses += s.Misses
acc.Timeouts += s.Timeouts
+
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
+ acc.StaleConns += s.StaleConns
}
for _, node := range state.slaves {
s := node.Client.connPool.Stats()
- acc.Requests += s.Requests
acc.Hits += s.Hits
+ acc.Misses += s.Misses
acc.Timeouts += s.Timeouts
+
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
+ acc.StaleConns += s.StaleConns
}
return &acc
@@ -762,10 +832,8 @@ func (c *ClusterClient) lazyReloadState() {
go func() {
defer atomic.StoreUint32(&c.reloading, 0)
- var state *clusterState
for {
- var err error
- state, err = c.reloadState()
+ state, err := c.reloadState()
if err == pool.ErrClosed {
return
}
@@ -776,11 +844,10 @@ func (c *ClusterClient) lazyReloadState() {
}
c._state.Store(state)
+ time.Sleep(5 * time.Second)
+ c.nodes.GC(state.generation)
break
}
-
- time.Sleep(3 * time.Second)
- c.nodes.GC(state.generation)
}()
}
@@ -810,21 +877,12 @@ func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
break
}
- var n int
for _, node := range nodes {
- nn, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
if err != nil {
internal.Logf("ReapStaleConns failed: %s", err)
- } else {
- n += nn
}
}
-
- s := c.PoolStats()
- internal.Logf(
- "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
- n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
- )
}
}
@@ -837,16 +895,21 @@ func (c *ClusterClient) Pipeline() Pipeliner {
}
func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().pipelined(fn)
+ return c.Pipeline().Pipelined(fn)
}
func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
cmdsMap, err := c.mapCmdsByNode(cmds)
if err != nil {
+ setCmdsErr(cmds, err)
return err
}
- for i := 0; i <= c.opt.MaxRedirects; i++ {
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
failedCmds := make(map[*clusterNode][]Cmder)
for node, cmds := range cmdsMap {
@@ -856,8 +919,12 @@ func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
continue
}
- err = c.pipelineProcessCmds(cn, cmds, failedCmds)
- node.Client.releaseConn(cn, err)
+ err = c.pipelineProcessCmds(node, cn, cmds, failedCmds)
+ if err == nil || internal.IsRedisError(err) {
+ _ = node.Client.connPool.Put(cn)
+ } else {
+ _ = node.Client.connPool.Remove(cn)
+ }
}
if len(failedCmds) == 0 {
@@ -866,21 +933,20 @@ func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
cmdsMap = failedCmds
}
- var firstErr error
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- firstErr = err
- break
- }
- }
- return firstErr
+ return firstCmdsErr(cmds)
}
func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
- state := c.state()
+ state, err := c.state()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return nil, err
+ }
+
cmdsMap := make(map[*clusterNode][]Cmder)
for _, cmd := range cmds {
- _, node, err := c.cmdSlotAndNode(state, cmd)
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
if err != nil {
return nil, err
}
@@ -890,11 +956,12 @@ func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, e
}
func (c *ClusterClient) pipelineProcessCmds(
- cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+ node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
cn.SetWriteTimeout(c.opt.WriteTimeout)
if err := writeCmd(cn, cmds...); err != nil {
setCmdsErr(cmds, err)
+ failedCmds[node] = cmds
return err
}
@@ -907,46 +974,53 @@ func (c *ClusterClient) pipelineProcessCmds(
func (c *ClusterClient) pipelineReadCmds(
cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
- var firstErr error
for _, cmd := range cmds {
err := cmd.readReply(cn)
if err == nil {
continue
}
- if firstErr == nil {
- firstErr = err
+ if c.checkMovedErr(cmd, err, failedCmds) {
+ continue
}
- err = c.checkMovedErr(cmd, failedCmds)
- if err != nil && firstErr == nil {
- firstErr = err
+ if internal.IsRedisError(err) {
+ continue
}
+
+ return err
}
- return firstErr
+ return nil
}
-func (c *ClusterClient) checkMovedErr(cmd Cmder, failedCmds map[*clusterNode][]Cmder) error {
- moved, ask, addr := internal.IsMovedError(cmd.Err())
+func (c *ClusterClient) checkMovedErr(
+ cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder,
+) bool {
+ moved, ask, addr := internal.IsMovedError(err)
+
if moved {
c.lazyReloadState()
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
- return err
+ return false
}
failedCmds[node] = append(failedCmds[node], cmd)
+ return true
}
+
if ask {
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
- return err
+ return false
}
failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
+ return true
}
- return nil
+
+ return false
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
@@ -959,29 +1033,29 @@ func (c *ClusterClient) TxPipeline() Pipeliner {
}
func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().pipelined(fn)
+ return c.TxPipeline().Pipelined(fn)
}
func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
- cmdsMap, err := c.mapCmdsBySlot(cmds)
+ state, err := c.state()
if err != nil {
return err
}
- state := c.state()
- if state == nil {
- return errNilClusterState
- }
-
+ cmdsMap := c.mapCmdsBySlot(cmds)
for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot)
if err != nil {
setCmdsErr(cmds, err)
continue
}
-
cmdsMap := map[*clusterNode][]Cmder{node: cmds}
- for i := 0; i <= c.opt.MaxRedirects; i++ {
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
failedCmds := make(map[*clusterNode][]Cmder)
for node, cmds := range cmdsMap {
@@ -992,7 +1066,11 @@ func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
}
err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
- node.Client.releaseConn(cn, err)
+ if err == nil || internal.IsRedisError(err) {
+ _ = node.Client.connPool.Put(cn)
+ } else {
+ _ = node.Client.connPool.Remove(cn)
+ }
}
if len(failedCmds) == 0 {
@@ -1002,27 +1080,16 @@ func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
}
}
- var firstErr error
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- firstErr = err
- break
- }
- }
- return firstErr
+ return firstCmdsErr(cmds)
}
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) (map[int][]Cmder, error) {
- state := c.state()
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds {
- slot, _, err := c.cmdSlotAndNode(state, cmd)
- if err != nil {
- return nil, err
- }
+ slot := c.cmdSlot(cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd)
}
- return cmdsMap, nil
+ return cmdsMap
}
func (c *ClusterClient) txPipelineProcessCmds(
@@ -1039,22 +1106,20 @@ func (c *ClusterClient) txPipelineProcessCmds(
cn.SetReadTimeout(c.opt.ReadTimeout)
if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil {
+ setCmdsErr(cmds, err)
return err
}
- _, err := pipelineReadCmds(cn, cmds)
- return err
+ return pipelineReadCmds(cn, cmds)
}
func (c *ClusterClient) txPipelineReadQueued(
cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
- var firstErr error
-
// Parse queued replies.
var statusCmd StatusCmd
- if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
- firstErr = err
+ if err := statusCmd.readReply(cn); err != nil {
+ return err
}
for _, cmd := range cmds {
@@ -1063,15 +1128,11 @@ func (c *ClusterClient) txPipelineReadQueued(
continue
}
- cmd.setErr(err)
- if firstErr == nil {
- firstErr = err
+ if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) {
+ continue
}
- err = c.checkMovedErr(cmd, failedCmds)
- if err != nil && firstErr == nil {
- firstErr = err
- }
+ return err
}
// Parse number of replies.
@@ -1085,7 +1146,13 @@ func (c *ClusterClient) txPipelineReadQueued(
switch line[0] {
case proto.ErrorReply:
- return proto.ParseErrorReply(line)
+ err := proto.ParseErrorReply(line)
+ for _, cmd := range cmds {
+ if !c.checkMovedErr(cmd, err, failedCmds) {
+ break
+ }
+ }
+ return err
case proto.ArrayReply:
// ok
default:
@@ -1093,7 +1160,7 @@ func (c *ClusterClient) txPipelineReadQueued(
return err
}
- return firstErr
+ return nil
}
func (c *ClusterClient) pubSub(channels []string) *PubSub {
@@ -1112,7 +1179,12 @@ func (c *ClusterClient) pubSub(channels []string) *PubSub {
slot = -1
}
- masterNode, err := c.state().slotMasterNode(slot)
+ state, err := c.state()
+ if err != nil {
+ return nil, err
+ }
+
+ masterNode, err := state.slotMasterNode(slot)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go
new file mode 100644
index 000000000..dff62c902
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/cluster_commands.go
@@ -0,0 +1,22 @@
+package redis
+
+import "sync/atomic"
+
+func (c *ClusterClient) DBSize() *IntCmd {
+ cmd := NewIntCmd("dbsize")
+ var size int64
+ err := c.ForEachMaster(func(master *Client) error {
+ n, err := master.DBSize().Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.setErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/cluster_test.go b/vendor/github.com/go-redis/redis/cluster_test.go
index 324bd1ce1..6f3677b93 100644
--- a/vendor/github.com/go-redis/redis/cluster_test.go
+++ b/vendor/github.com/go-redis/redis/cluster_test.go
@@ -200,7 +200,7 @@ var _ = Describe("ClusterClient", func() {
Eventually(func() string {
return client.Get("A").Val()
- }).Should(Equal("VALUE"))
+ }, 30*time.Second).Should(Equal("VALUE"))
cnt, err := client.Del("A").Result()
Expect(err).NotTo(HaveOccurred())
@@ -215,7 +215,7 @@ var _ = Describe("ClusterClient", func() {
Eventually(func() string {
return client.Get("A").Val()
- }).Should(Equal("VALUE"))
+ }, 30*time.Second).Should(Equal("VALUE"))
})
It("distributes keys", func() {
@@ -227,7 +227,7 @@ var _ = Describe("ClusterClient", func() {
for _, master := range cluster.masters() {
Eventually(func() string {
return master.Info("keyspace").Val()
- }, 5*time.Second).Should(Or(
+ }, 30*time.Second).Should(Or(
ContainSubstring("keys=31"),
ContainSubstring("keys=29"),
ContainSubstring("keys=40"),
@@ -251,7 +251,7 @@ var _ = Describe("ClusterClient", func() {
for _, master := range cluster.masters() {
Eventually(func() string {
return master.Info("keyspace").Val()
- }, 5*time.Second).Should(Or(
+ }, 30*time.Second).Should(Or(
ContainSubstring("keys=31"),
ContainSubstring("keys=29"),
ContainSubstring("keys=40"),
@@ -320,10 +320,6 @@ var _ = Describe("ClusterClient", func() {
Expect(err).NotTo(HaveOccurred())
Expect(cmds).To(HaveLen(14))
- if opt.RouteByLatency {
- return
- }
-
for _, key := range keys {
slot := hashtag.Slot(key)
client.SwapSlotNodes(slot)
@@ -432,6 +428,9 @@ var _ = Describe("ClusterClient", func() {
})
AfterEach(func() {
+ _ = client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
Expect(client.Close()).NotTo(HaveOccurred())
})
@@ -476,11 +475,9 @@ var _ = Describe("ClusterClient", func() {
})
Expect(err).NotTo(HaveOccurred())
- for _, client := range cluster.masters() {
- size, err := client.DBSize().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(size).To(Equal(int64(0)))
- }
+ size, err := client.DBSize().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(0)))
})
It("should CLUSTER SLOTS", func() {
@@ -560,6 +557,9 @@ var _ = Describe("ClusterClient", func() {
})
AfterEach(func() {
+ _ = client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
Expect(client.Close()).NotTo(HaveOccurred())
})
@@ -575,10 +575,19 @@ var _ = Describe("ClusterClient", func() {
_ = client.ForEachMaster(func(master *redis.Client) error {
return master.FlushDB().Err()
})
+
+ _ = client.ForEachSlave(func(slave *redis.Client) error {
+ Eventually(func() int64 {
+ return client.DBSize().Val()
+ }, 30*time.Second).Should(Equal(int64(0)))
+ return nil
+ })
})
AfterEach(func() {
- client.FlushDB()
+ _ = client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
Expect(client.Close()).NotTo(HaveOccurred())
})
@@ -597,7 +606,7 @@ var _ = Describe("ClusterClient without nodes", func() {
Expect(client.Close()).NotTo(HaveOccurred())
})
- It("returns an error", func() {
+ It("Ping returns an error", func() {
err := client.Ping().Err()
Expect(err).To(MatchError("redis: cluster has no nodes"))
})
@@ -626,7 +635,7 @@ var _ = Describe("ClusterClient without valid nodes", func() {
It("returns an error", func() {
err := client.Ping().Err()
- Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ Expect(err).To(MatchError("redis: cannot load cluster slots"))
})
It("pipeline returns an error", func() {
@@ -634,7 +643,7 @@ var _ = Describe("ClusterClient without valid nodes", func() {
pipe.Ping()
return nil
})
- Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ Expect(err).To(MatchError("redis: cannot load cluster slots"))
})
})
@@ -664,7 +673,7 @@ var _ = Describe("ClusterClient timeout", func() {
It("Tx timeouts", func() {
err := client.Watch(func(tx *redis.Tx) error {
return tx.Ping().Err()
- })
+ }, "foo")
Expect(err).To(HaveOccurred())
Expect(err.(net.Error).Timeout()).To(BeTrue())
})
@@ -676,42 +685,20 @@ var _ = Describe("ClusterClient timeout", func() {
return nil
})
return err
- })
+ }, "foo")
Expect(err).To(HaveOccurred())
Expect(err.(net.Error).Timeout()).To(BeTrue())
})
}
- Context("read timeout", func() {
- BeforeEach(func() {
- opt := redisClusterOptions()
- opt.ReadTimeout = time.Nanosecond
- opt.WriteTimeout = -1
- client = cluster.clusterClient(opt)
- })
-
- testTimeout()
- })
-
- Context("write timeout", func() {
- BeforeEach(func() {
- opt := redisClusterOptions()
- opt.ReadTimeout = time.Nanosecond
- opt.WriteTimeout = -1
- client = cluster.clusterClient(opt)
- })
-
- testTimeout()
- })
-
- Context("ClientPause timeout", func() {
- const pause = time.Second
+ const pause = time.Second
+ Context("read/write timeout", func() {
BeforeEach(func() {
opt := redisClusterOptions()
- opt.ReadTimeout = pause / 10
- opt.WriteTimeout = pause / 10
- opt.MaxRedirects = -1
+ opt.ReadTimeout = 100 * time.Millisecond
+ opt.WriteTimeout = 100 * time.Millisecond
+ opt.MaxRedirects = 1
client = cluster.clusterClient(opt)
err := client.ForEachNode(func(client *redis.Client) error {
diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go
index 0e5b2016e..d2688082a 100644
--- a/vendor/github.com/go-redis/redis/command.go
+++ b/vendor/github.com/go-redis/redis/command.go
@@ -12,28 +12,10 @@ import (
"github.com/go-redis/redis/internal/proto"
)
-var (
- _ Cmder = (*Cmd)(nil)
- _ Cmder = (*SliceCmd)(nil)
- _ Cmder = (*StatusCmd)(nil)
- _ Cmder = (*IntCmd)(nil)
- _ Cmder = (*DurationCmd)(nil)
- _ Cmder = (*BoolCmd)(nil)
- _ Cmder = (*StringCmd)(nil)
- _ Cmder = (*FloatCmd)(nil)
- _ Cmder = (*StringSliceCmd)(nil)
- _ Cmder = (*BoolSliceCmd)(nil)
- _ Cmder = (*StringStringMapCmd)(nil)
- _ Cmder = (*StringIntMapCmd)(nil)
- _ Cmder = (*ZSliceCmd)(nil)
- _ Cmder = (*ScanCmd)(nil)
- _ Cmder = (*ClusterSlotsCmd)(nil)
-)
-
type Cmder interface {
- args() []interface{}
- arg(int) string
Name() string
+ Args() []interface{}
+ stringArg(int) string
readReply(*pool.Conn) error
setErr(error)
@@ -46,14 +28,25 @@ type Cmder interface {
func setCmdsErr(cmds []Cmder, e error) {
for _, cmd := range cmds {
- cmd.setErr(e)
+ if cmd.Err() == nil {
+ cmd.setErr(e)
+ }
}
}
+func firstCmdsErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
cn.Wb.Reset()
for _, cmd := range cmds {
- if err := cn.Wb.Append(cmd.args()); err != nil {
+ if err := cn.Wb.Append(cmd.Args()); err != nil {
return err
}
}
@@ -64,7 +57,7 @@ func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
func cmdString(cmd Cmder, val interface{}) string {
var ss []string
- for _, arg := range cmd.args() {
+ for _, arg := range cmd.Args() {
ss = append(ss, fmt.Sprint(arg))
}
s := strings.Join(ss, " ")
@@ -86,7 +79,7 @@ func cmdString(cmd Cmder, val interface{}) string {
func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
switch cmd.Name() {
case "eval", "evalsha":
- if cmd.arg(2) != "0" {
+ if cmd.stringArg(2) != "0" {
return 3
} else {
return -1
@@ -95,7 +88,6 @@ func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
return 1
}
if info == nil {
- internal.Logf("info for cmd=%s not found", cmd.Name())
return -1
}
return int(info.FirstKeyPos)
@@ -110,15 +102,17 @@ type baseCmd struct {
_readTimeout *time.Duration
}
+var _ Cmder = (*Cmd)(nil)
+
func (cmd *baseCmd) Err() error {
return cmd.err
}
-func (cmd *baseCmd) args() []interface{} {
+func (cmd *baseCmd) Args() []interface{} {
return cmd._args
}
-func (cmd *baseCmd) arg(pos int) string {
+func (cmd *baseCmd) stringArg(pos int) string {
if pos < 0 || pos >= len(cmd._args) {
return ""
}
@@ -129,7 +123,7 @@ func (cmd *baseCmd) arg(pos int) string {
func (cmd *baseCmd) Name() string {
if len(cmd._args) > 0 {
// Cmd name must be lower cased.
- s := internal.ToLower(cmd.arg(0))
+ s := internal.ToLower(cmd.stringArg(0))
cmd._args[0] = s
return s
}
@@ -194,6 +188,8 @@ type SliceCmd struct {
val []interface{}
}
+var _ Cmder = (*SliceCmd)(nil)
+
func NewSliceCmd(args ...interface{}) *SliceCmd {
return &SliceCmd{
baseCmd: baseCmd{_args: args},
@@ -230,6 +226,8 @@ type StatusCmd struct {
val string
}
+var _ Cmder = (*StatusCmd)(nil)
+
func NewStatusCmd(args ...interface{}) *StatusCmd {
return &StatusCmd{
baseCmd: baseCmd{_args: args},
@@ -261,6 +259,8 @@ type IntCmd struct {
val int64
}
+var _ Cmder = (*IntCmd)(nil)
+
func NewIntCmd(args ...interface{}) *IntCmd {
return &IntCmd{
baseCmd: baseCmd{_args: args},
@@ -293,6 +293,8 @@ type DurationCmd struct {
precision time.Duration
}
+var _ Cmder = (*DurationCmd)(nil)
+
func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
return &DurationCmd{
baseCmd: baseCmd{_args: args},
@@ -330,6 +332,8 @@ type TimeCmd struct {
val time.Time
}
+var _ Cmder = (*TimeCmd)(nil)
+
func NewTimeCmd(args ...interface{}) *TimeCmd {
return &TimeCmd{
baseCmd: baseCmd{_args: args},
@@ -366,6 +370,8 @@ type BoolCmd struct {
val bool
}
+var _ Cmder = (*BoolCmd)(nil)
+
func NewBoolCmd(args ...interface{}) *BoolCmd {
return &BoolCmd{
baseCmd: baseCmd{_args: args},
@@ -421,6 +427,8 @@ type StringCmd struct {
val []byte
}
+var _ Cmder = (*StringCmd)(nil)
+
func NewStringCmd(args ...interface{}) *StringCmd {
return &StringCmd{
baseCmd: baseCmd{_args: args},
@@ -484,6 +492,8 @@ type FloatCmd struct {
val float64
}
+var _ Cmder = (*FloatCmd)(nil)
+
func NewFloatCmd(args ...interface{}) *FloatCmd {
return &FloatCmd{
baseCmd: baseCmd{_args: args},
@@ -515,6 +525,8 @@ type StringSliceCmd struct {
val []string
}
+var _ Cmder = (*StringSliceCmd)(nil)
+
func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
return &StringSliceCmd{
baseCmd: baseCmd{_args: args},
@@ -555,6 +567,8 @@ type BoolSliceCmd struct {
val []bool
}
+var _ Cmder = (*BoolSliceCmd)(nil)
+
func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
return &BoolSliceCmd{
baseCmd: baseCmd{_args: args},
@@ -591,6 +605,8 @@ type StringStringMapCmd struct {
val map[string]string
}
+var _ Cmder = (*StringStringMapCmd)(nil)
+
func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
return &StringStringMapCmd{
baseCmd: baseCmd{_args: args},
@@ -627,6 +643,8 @@ type StringIntMapCmd struct {
val map[string]int64
}
+var _ Cmder = (*StringIntMapCmd)(nil)
+
func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
return &StringIntMapCmd{
baseCmd: baseCmd{_args: args},
@@ -663,6 +681,8 @@ type ZSliceCmd struct {
val []Z
}
+var _ Cmder = (*ZSliceCmd)(nil)
+
func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
return &ZSliceCmd{
baseCmd: baseCmd{_args: args},
@@ -702,6 +722,8 @@ type ScanCmd struct {
process func(cmd Cmder) error
}
+var _ Cmder = (*ScanCmd)(nil)
+
func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
return &ScanCmd{
baseCmd: baseCmd{_args: args},
@@ -752,6 +774,8 @@ type ClusterSlotsCmd struct {
val []ClusterSlot
}
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
return &ClusterSlotsCmd{
baseCmd: baseCmd{_args: args},
@@ -811,6 +835,8 @@ type GeoLocationCmd struct {
locations []GeoLocation
}
+var _ Cmder = (*GeoLocationCmd)(nil)
+
func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
args = append(args, q.Radius)
if q.Unit != "" {
@@ -881,6 +907,8 @@ type GeoPosCmd struct {
positions []*GeoPos
}
+var _ Cmder = (*GeoPosCmd)(nil)
+
func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
return &GeoPosCmd{
baseCmd: baseCmd{_args: args},
@@ -927,6 +955,8 @@ type CommandsInfoCmd struct {
val map[string]*CommandInfo
}
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
return &CommandsInfoCmd{
baseCmd: baseCmd{_args: args},
diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go
index 83b3824f8..a3b90f12d 100644
--- a/vendor/github.com/go-redis/redis/commands.go
+++ b/vendor/github.com/go-redis/redis/commands.go
@@ -11,7 +11,7 @@ func readTimeout(timeout time.Duration) time.Duration {
if timeout == 0 {
return 0
}
- return timeout + time.Second
+ return timeout + 10*time.Second
}
func usePrecise(dur time.Duration) bool {
@@ -42,6 +42,9 @@ type Cmdable interface {
Pipeline() Pipeliner
Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
ClientGetName() *StringCmd
Echo(message interface{}) *StringCmd
Ping() *StatusCmd
diff --git a/vendor/github.com/go-redis/redis/commands_test.go b/vendor/github.com/go-redis/redis/commands_test.go
index 4298cba68..6b81f23cf 100644
--- a/vendor/github.com/go-redis/redis/commands_test.go
+++ b/vendor/github.com/go-redis/redis/commands_test.go
@@ -27,11 +27,21 @@ var _ = Describe("Commands", func() {
Describe("server", func() {
It("should Auth", func() {
- _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ cmds, err := client.Pipelined(func(pipe redis.Pipeliner) error {
pipe.Auth("password")
+ pipe.Auth("")
return nil
})
Expect(err).To(MatchError("ERR Client sent AUTH, but no password is set"))
+ Expect(cmds[0].Err()).To(MatchError("ERR Client sent AUTH, but no password is set"))
+ Expect(cmds[1].Err()).To(MatchError("ERR Client sent AUTH, but no password is set"))
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ Expect(stats.TotalConns).To(Equal(uint32(1)))
+ Expect(stats.FreeConns).To(Equal(uint32(1)))
})
It("should Echo", func() {
@@ -187,6 +197,29 @@ var _ = Describe("Commands", func() {
Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
})
+ It("Should Command", func() {
+ cmds, err := client.Command().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(BeNumerically("~", 180, 10))
+
+ cmd := cmds["mget"]
+ Expect(cmd.Name).To(Equal("mget"))
+ Expect(cmd.Arity).To(Equal(int8(-2)))
+ Expect(cmd.Flags).To(ContainElement("readonly"))
+ Expect(cmd.FirstKeyPos).To(Equal(int8(1)))
+ Expect(cmd.LastKeyPos).To(Equal(int8(-1)))
+ Expect(cmd.StepCount).To(Equal(int8(1)))
+
+ cmd = cmds["ping"]
+ Expect(cmd.Name).To(Equal("ping"))
+ Expect(cmd.Arity).To(Equal(int8(-1)))
+ Expect(cmd.Flags).To(ContainElement("stale"))
+ Expect(cmd.Flags).To(ContainElement("fast"))
+ Expect(cmd.FirstKeyPos).To(Equal(int8(0)))
+ Expect(cmd.LastKeyPos).To(Equal(int8(0)))
+ Expect(cmd.StepCount).To(Equal(int8(0)))
+ })
+
})
Describe("debugging", func() {
@@ -1358,8 +1391,8 @@ var _ = Describe("Commands", func() {
Expect(client.Ping().Err()).NotTo(HaveOccurred())
stats := client.PoolStats()
- Expect(stats.Requests).To(Equal(uint32(3)))
Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Misses).To(Equal(uint32(2)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
@@ -2887,24 +2920,6 @@ var _ = Describe("Commands", func() {
})
- Describe("Command", func() {
-
- It("returns map of commands", func() {
- cmds, err := client.Command().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(len(cmds)).To(BeNumerically("~", 180, 10))
-
- cmd := cmds["mget"]
- Expect(cmd.Name).To(Equal("mget"))
- Expect(cmd.Arity).To(Equal(int8(-2)))
- Expect(cmd.Flags).To(ContainElement("readonly"))
- Expect(cmd.FirstKeyPos).To(Equal(int8(1)))
- Expect(cmd.LastKeyPos).To(Equal(int8(-1)))
- Expect(cmd.StepCount).To(Equal(int8(1)))
- })
-
- })
-
Describe("Eval", func() {
It("returns keys and values", func() {
diff --git a/vendor/github.com/go-redis/redis/export_test.go b/vendor/github.com/go-redis/redis/export_test.go
index 3b7965d79..bcc18c457 100644
--- a/vendor/github.com/go-redis/redis/export_test.go
+++ b/vendor/github.com/go-redis/redis/export_test.go
@@ -20,8 +20,13 @@ func (c *PubSub) ReceiveMessageTimeout(timeout time.Duration) (*Message, error)
}
func (c *ClusterClient) SlotAddrs(slot int) []string {
+ state, err := c.state()
+ if err != nil {
+ panic(err)
+ }
+
var addrs []string
- for _, n := range c.state().slotNodes(slot) {
+ for _, n := range state.slotNodes(slot) {
addrs = append(addrs, n.Client.getAddr())
}
return addrs
@@ -29,7 +34,12 @@ func (c *ClusterClient) SlotAddrs(slot int) []string {
// SwapSlot swaps a slot's master/slave address for testing MOVED redirects.
func (c *ClusterClient) SwapSlotNodes(slot int) {
- nodes := c.state().slots[slot]
+ state, err := c.state()
+ if err != nil {
+ panic(err)
+ }
+
+ nodes := state.slots[slot]
if len(nodes) == 2 {
nodes[0], nodes[1] = nodes[1], nodes[0]
}
diff --git a/vendor/github.com/go-redis/redis/internal/error.go b/vendor/github.com/go-redis/redis/internal/error.go
index 90f6503a1..0898eeb62 100644
--- a/vendor/github.com/go-redis/redis/internal/error.go
+++ b/vendor/github.com/go-redis/redis/internal/error.go
@@ -12,11 +12,24 @@ type RedisError string
func (e RedisError) Error() string { return string(e) }
-func IsRetryableError(err error) bool {
- return IsNetworkError(err) || err.Error() == "ERR max number of clients reached"
+func IsRetryableError(err error, retryNetError bool) bool {
+ if IsNetworkError(err) {
+ return retryNetError
+ }
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ return false
}
-func IsInternalError(err error) bool {
+func IsRedisError(err error) bool {
_, ok := err.(RedisError)
return ok
}
@@ -33,7 +46,7 @@ func IsBadConn(err error, allowTimeout bool) bool {
if err == nil {
return false
}
- if IsInternalError(err) {
+ if IsRedisError(err) {
return false
}
if allowTimeout {
@@ -45,7 +58,7 @@ func IsBadConn(err error, allowTimeout bool) bool {
}
func IsMovedError(err error) (moved bool, ask bool, addr string) {
- if !IsInternalError(err) {
+ if !IsRedisError(err) {
return
}
@@ -69,7 +82,3 @@ func IsMovedError(err error) (moved bool, ask bool, addr string) {
func IsLoadingError(err error) bool {
return strings.HasPrefix(err.Error(), "LOADING ")
}
-
-func IsClusterDownError(err error) bool {
- return strings.HasPrefix(err.Error(), "CLUSTERDOWN ")
-}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool.go b/vendor/github.com/go-redis/redis/internal/pool/pool.go
index 25e78aa3c..836ec1045 100644
--- a/vendor/github.com/go-redis/redis/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool.go
@@ -23,12 +23,13 @@ var timers = sync.Pool{
// Stats contains pool state information and accumulated stats.
type Stats struct {
- Requests uint32 // number of times a connection was requested by the pool
Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
Timeouts uint32 // number of times a wait timeout occurred
- TotalConns uint32 // the number of total connections in the pool
- FreeConns uint32 // the number of free connections in the pool
+ TotalConns uint32 // number of total connections in the pool
+ FreeConns uint32 // number of free connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
}
type Pooler interface {
@@ -150,8 +151,6 @@ func (p *ConnPool) Get() (*Conn, bool, error) {
return nil, false, ErrClosed
}
- atomic.AddUint32(&p.stats.Requests, 1)
-
select {
case p.queue <- struct{}{}:
default:
@@ -189,6 +188,8 @@ func (p *ConnPool) Get() (*Conn, bool, error) {
return cn, false, nil
}
+ atomic.AddUint32(&p.stats.Misses, 1)
+
newcn, err := p.NewConn()
if err != nil {
<-p.queue
@@ -265,11 +266,13 @@ func (p *ConnPool) FreeLen() int {
func (p *ConnPool) Stats() *Stats {
return &Stats{
- Requests: atomic.LoadUint32(&p.stats.Requests),
- Hits: atomic.LoadUint32(&p.stats.Hits),
- Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
TotalConns: uint32(p.Len()),
FreeConns: uint32(p.FreeLen()),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
@@ -362,10 +365,6 @@ func (p *ConnPool) reaper(frequency time.Duration) {
internal.Logf("ReapStaleConns failed: %s", err)
continue
}
- s := p.Stats()
- internal.Logf(
- "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
- n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
- )
+ atomic.AddUint32(&p.stats.StaleConns, uint32(n))
}
}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader.go b/vendor/github.com/go-redis/redis/internal/proto/reader.go
index 2159cf639..cd94329d8 100644
--- a/vendor/github.com/go-redis/redis/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/internal/proto/reader.go
@@ -63,7 +63,7 @@ func (p *Reader) ReadLine() ([]byte, error) {
return nil, bufio.ErrBufferFull
}
if len(line) == 0 {
- return nil, internal.RedisError("redis: reply is empty")
+ return nil, fmt.Errorf("redis: reply is empty")
}
if isNilReply(line) {
return nil, internal.Nil
diff --git a/vendor/github.com/go-redis/redis/internal/proto/scan.go b/vendor/github.com/go-redis/redis/internal/proto/scan.go
index 3ab40b94f..0431a877d 100644
--- a/vendor/github.com/go-redis/redis/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/internal/proto/scan.go
@@ -11,7 +11,7 @@ import (
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
case nil:
- return internal.RedisError("redis: Scan(nil)")
+ return fmt.Errorf("redis: Scan(nil)")
case *string:
*v = internal.BytesToString(b)
return nil
diff --git a/vendor/github.com/go-redis/redis/main_test.go b/vendor/github.com/go-redis/redis/main_test.go
index 30f09c618..7c5a6a969 100644
--- a/vendor/github.com/go-redis/redis/main_test.go
+++ b/vendor/github.com/go-redis/redis/main_test.go
@@ -50,10 +50,6 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6),
}
-func init() {
- //redis.SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile))
-}
-
var _ = BeforeSuite(func() {
var err error
diff --git a/vendor/github.com/go-redis/redis/options.go b/vendor/github.com/go-redis/redis/options.go
index dea045453..75648053d 100644
--- a/vendor/github.com/go-redis/redis/options.go
+++ b/vendor/github.com/go-redis/redis/options.go
@@ -198,13 +198,3 @@ func newConnPool(opt *Options) *pool.ConnPool {
IdleCheckFrequency: opt.IdleCheckFrequency,
})
}
-
-// PoolStats contains pool state information and accumulated stats.
-type PoolStats struct {
- Requests uint32 // number of times a connection was requested by the pool
- Hits uint32 // number of times free connection was found in the pool
- Timeouts uint32 // number of times a wait timeout occurred
-
- TotalConns uint32 // the number of total connections in the pool
- FreeConns uint32 // the number of free connections in the pool
-}
diff --git a/vendor/github.com/go-redis/redis/pipeline.go b/vendor/github.com/go-redis/redis/pipeline.go
index b66c0597f..9349ef553 100644
--- a/vendor/github.com/go-redis/redis/pipeline.go
+++ b/vendor/github.com/go-redis/redis/pipeline.go
@@ -13,9 +13,7 @@ type Pipeliner interface {
Process(cmd Cmder) error
Close() error
Discard() error
- discard() error
Exec() ([]Cmder, error)
- pipelined(fn func(Pipeliner) error) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
@@ -104,3 +102,11 @@ func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
func (c *Pipeline) Pipeline() Pipeliner {
return c
}
+
+func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.pipelined(fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/pool_test.go b/vendor/github.com/go-redis/redis/pool_test.go
index 34a548a63..0ca09adc7 100644
--- a/vendor/github.com/go-redis/redis/pool_test.go
+++ b/vendor/github.com/go-redis/redis/pool_test.go
@@ -95,8 +95,8 @@ var _ = Describe("pool", func() {
Expect(pool.FreeLen()).To(Equal(1))
stats := pool.Stats()
- Expect(stats.Requests).To(Equal(uint32(4)))
Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(2)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
@@ -112,30 +112,32 @@ var _ = Describe("pool", func() {
Expect(pool.FreeLen()).To(Equal(1))
stats := pool.Stats()
- Expect(stats.Requests).To(Equal(uint32(101)))
Expect(stats.Hits).To(Equal(uint32(100)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
It("removes idle connections", func() {
stats := client.PoolStats()
Expect(stats).To(Equal(&redis.PoolStats{
- Requests: 1,
Hits: 0,
+ Misses: 1,
Timeouts: 0,
TotalConns: 1,
FreeConns: 1,
+ StaleConns: 0,
}))
time.Sleep(2 * time.Second)
stats = client.PoolStats()
Expect(stats).To(Equal(&redis.PoolStats{
- Requests: 1,
Hits: 0,
+ Misses: 1,
Timeouts: 0,
TotalConns: 0,
FreeConns: 0,
+ StaleConns: 1,
}))
})
})
diff --git a/vendor/github.com/go-redis/redis/pubsub.go b/vendor/github.com/go-redis/redis/pubsub.go
index 4a5c65f57..e754a16f2 100644
--- a/vendor/github.com/go-redis/redis/pubsub.go
+++ b/vendor/github.com/go-redis/redis/pubsub.go
@@ -95,7 +95,10 @@ func (c *PubSub) releaseConn(cn *pool.Conn, err error) {
}
func (c *PubSub) _releaseConn(cn *pool.Conn, err error) {
- if internal.IsBadConn(err, true) && c.cn == cn {
+ if c.cn != cn {
+ return
+ }
+ if internal.IsBadConn(err, true) {
_ = c.closeTheCn()
}
}
diff --git a/vendor/github.com/go-redis/redis/pubsub_test.go b/vendor/github.com/go-redis/redis/pubsub_test.go
index 1d9dfcb99..6fc04a198 100644
--- a/vendor/github.com/go-redis/redis/pubsub_test.go
+++ b/vendor/github.com/go-redis/redis/pubsub_test.go
@@ -68,7 +68,7 @@ var _ = Describe("PubSub", func() {
}
stats := client.PoolStats()
- Expect(stats.Requests - stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(2)))
})
It("should pub/sub channels", func() {
@@ -191,7 +191,7 @@ var _ = Describe("PubSub", func() {
}
stats := client.PoolStats()
- Expect(stats.Requests - stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(2)))
})
It("should ping/pong", func() {
@@ -290,8 +290,8 @@ var _ = Describe("PubSub", func() {
Eventually(done).Should(Receive())
stats := client.PoolStats()
- Expect(stats.Requests).To(Equal(uint32(2)))
Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
})
It("returns an error when subscribe fails", func() {
diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go
index b18973cdb..230091b3e 100644
--- a/vendor/github.com/go-redis/redis/redis.go
+++ b/vendor/github.com/go-redis/redis/redis.go
@@ -3,6 +3,7 @@ package redis
import (
"fmt"
"log"
+ "os"
"time"
"github.com/go-redis/redis/internal"
@@ -13,6 +14,10 @@ import (
// Redis nil reply, .e.g. when key does not exist.
const Nil = internal.Nil
+func init() {
+ SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile))
+}
+
func SetLogger(logger *log.Logger) {
internal.Logger = logger
}
@@ -131,7 +136,7 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
cn, _, err := c.getConn()
if err != nil {
cmd.setErr(err)
- if internal.IsRetryableError(err) {
+ if internal.IsRetryableError(err, true) {
continue
}
return err
@@ -141,7 +146,7 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
if err := writeCmd(cn, cmd); err != nil {
c.releaseConn(cn, err)
cmd.setErr(err)
- if internal.IsRetryableError(err) {
+ if internal.IsRetryableError(err, true) {
continue
}
return err
@@ -150,7 +155,7 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
cn.SetReadTimeout(c.cmdTimeout(cmd))
err = cmd.readReply(cn)
c.releaseConn(cn, err)
- if err != nil && internal.IsRetryableError(err) {
+ if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) {
continue
}
@@ -197,8 +202,11 @@ type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error)
func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer {
return func(cmds []Cmder) error {
- var firstErr error
- for i := 0; i <= c.opt.MaxRetries; i++ {
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
cn, _, err := c.getConn()
if err != nil {
setCmdsErr(cmds, err)
@@ -206,18 +214,18 @@ func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer {
}
canRetry, err := p(cn, cmds)
- c.releaseConn(cn, err)
- if err == nil {
- return nil
- }
- if firstErr == nil {
- firstErr = err
+
+ if err == nil || internal.IsRedisError(err) {
+ _ = c.connPool.Put(cn)
+ break
}
- if !canRetry || !internal.IsRetryableError(err) {
+ _ = c.connPool.Remove(cn)
+
+ if !canRetry || !internal.IsRetryableError(err, true) {
break
}
}
- return firstErr
+ return firstCmdsErr(cmds)
}
}
@@ -230,23 +238,17 @@ func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, err
// Set read timeout for all commands.
cn.SetReadTimeout(c.opt.ReadTimeout)
- return pipelineReadCmds(cn, cmds)
+ return true, pipelineReadCmds(cn, cmds)
}
-func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) (retry bool, firstErr error) {
- for i, cmd := range cmds {
+func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) error {
+ for _, cmd := range cmds {
err := cmd.readReply(cn)
- if err == nil {
- continue
- }
- if i == 0 {
- retry = true
- }
- if firstErr == nil {
- firstErr = err
+ if err != nil && !internal.IsRedisError(err) {
+ return err
}
}
- return
+ return nil
}
func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
@@ -260,11 +262,11 @@ func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, e
cn.SetReadTimeout(c.opt.ReadTimeout)
if err := c.txPipelineReadQueued(cn, cmds); err != nil {
+ setCmdsErr(cmds, err)
return false, err
}
- _, err := pipelineReadCmds(cn, cmds)
- return false, err
+ return false, pipelineReadCmds(cn, cmds)
}
func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error {
@@ -276,21 +278,16 @@ func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error {
}
func (c *baseClient) txPipelineReadQueued(cn *pool.Conn, cmds []Cmder) error {
- var firstErr error
-
// Parse queued replies.
var statusCmd StatusCmd
- if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
- firstErr = err
+ if err := statusCmd.readReply(cn); err != nil {
+ return err
}
- for _, cmd := range cmds {
+ for _ = range cmds {
err := statusCmd.readReply(cn)
- if err != nil {
- cmd.setErr(err)
- if firstErr == nil {
- firstErr = err
- }
+ if err != nil && !internal.IsRedisError(err) {
+ return err
}
}
@@ -355,21 +352,16 @@ func (c *Client) Options() *Options {
return c.opt
}
+type PoolStats pool.Stats
+
// PoolStats returns connection pool stats.
func (c *Client) PoolStats() *PoolStats {
- s := c.connPool.Stats()
- return &PoolStats{
- Requests: s.Requests,
- Hits: s.Hits,
- Timeouts: s.Timeouts,
-
- TotalConns: s.TotalConns,
- FreeConns: s.FreeConns,
- }
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
}
func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().pipelined(fn)
+ return c.Pipeline().Pipelined(fn)
}
func (c *Client) Pipeline() Pipeliner {
@@ -381,7 +373,7 @@ func (c *Client) Pipeline() Pipeliner {
}
func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().pipelined(fn)
+ return c.TxPipeline().Pipelined(fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
@@ -433,7 +425,7 @@ type Conn struct {
}
func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().pipelined(fn)
+ return c.Pipeline().Pipelined(fn)
}
func (c *Conn) Pipeline() Pipeliner {
@@ -445,7 +437,7 @@ func (c *Conn) Pipeline() Pipeliner {
}
func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().pipelined(fn)
+ return c.TxPipeline().Pipelined(fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go
index 72d52bf75..a30c32102 100644
--- a/vendor/github.com/go-redis/redis/ring.go
+++ b/vendor/github.com/go-redis/redis/ring.go
@@ -34,7 +34,9 @@ type RingOptions struct {
DB int
Password string
- MaxRetries int
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
@@ -50,6 +52,19 @@ func (opt *RingOptions) init() {
if opt.HeartbeatFrequency == 0 {
opt.HeartbeatFrequency = 500 * time.Millisecond
}
+
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
}
func (opt *RingOptions) clientOptions() *Options {
@@ -130,9 +145,10 @@ type Ring struct {
opt *RingOptions
nreplicas int
- mu sync.RWMutex
- hash *consistenthash.Map
- shards map[string]*ringShard
+ mu sync.RWMutex
+ hash *consistenthash.Map
+ shards map[string]*ringShard
+ shardsList []*ringShard
cmdsInfoOnce internal.Once
cmdsInfo map[string]*CommandInfo
@@ -154,24 +170,41 @@ func NewRing(opt *RingOptions) *Ring {
for name, addr := range opt.Addrs {
clopt := opt.clientOptions()
clopt.Addr = addr
- ring.addClient(name, NewClient(clopt))
+ ring.addShard(name, NewClient(clopt))
}
go ring.heartbeat()
return ring
}
+func (c *Ring) addShard(name string, cl *Client) {
+ shard := &ringShard{Client: cl}
+ c.mu.Lock()
+ c.hash.Add(name)
+ c.shards[name] = shard
+ c.shardsList = append(c.shardsList, shard)
+ c.mu.Unlock()
+}
+
// Options returns read-only Options that were used to create the client.
func (c *Ring) Options() *RingOptions {
return c.opt
}
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
+ c.mu.RLock()
+ shards := c.shardsList
+ c.mu.RUnlock()
+
var acc PoolStats
- for _, shard := range c.shards {
+ for _, shard := range shards {
s := shard.Client.connPool.Stats()
- acc.Requests += s.Requests
acc.Hits += s.Hits
+ acc.Misses += s.Misses
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
@@ -210,9 +243,13 @@ func (c *Ring) PSubscribe(channels ...string) *PubSub {
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+ c.mu.RLock()
+ shards := c.shardsList
+ c.mu.RUnlock()
+
var wg sync.WaitGroup
errCh := make(chan error, 1)
- for _, shard := range c.shards {
+ for _, shard := range shards {
if shard.IsDown() {
continue
}
@@ -241,8 +278,12 @@ func (c *Ring) ForEachShard(fn func(client *Client) error) error {
func (c *Ring) cmdInfo(name string) *CommandInfo {
err := c.cmdsInfoOnce.Do(func() error {
+ c.mu.RLock()
+ shards := c.shardsList
+ c.mu.RUnlock()
+
var firstErr error
- for _, shard := range c.shards {
+ for _, shard := range shards {
cmdsInfo, err := shard.Client.Command().Result()
if err == nil {
c.cmdsInfo = cmdsInfo
@@ -257,14 +298,11 @@ func (c *Ring) cmdInfo(name string) *CommandInfo {
if err != nil {
return nil
}
- return c.cmdsInfo[name]
-}
-
-func (c *Ring) addClient(name string, cl *Client) {
- c.mu.Lock()
- c.hash.Add(name)
- c.shards[name] = &ringShard{Client: cl}
- c.mu.Unlock()
+ info := c.cmdsInfo[name]
+ if info == nil {
+ internal.Logf("info for cmd=%s not found", name)
+ }
+ return info
}
func (c *Ring) shardByKey(key string) (*ringShard, error) {
@@ -305,7 +343,7 @@ func (c *Ring) shardByName(name string) (*ringShard, error) {
func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
return c.shardByKey(firstKey)
}
@@ -346,7 +384,10 @@ func (c *Ring) heartbeat() {
break
}
- for _, shard := range c.shards {
+ shards := c.shardsList
+ c.mu.RUnlock()
+
+ for _, shard := range shards {
err := shard.Client.Ping().Err()
if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
internal.Logf("ring shard state changed: %s", shard)
@@ -354,8 +395,6 @@ func (c *Ring) heartbeat() {
}
}
- c.mu.RUnlock()
-
if rebalance {
c.rebalance()
}
@@ -383,6 +422,7 @@ func (c *Ring) Close() error {
}
c.hash = nil
c.shards = nil
+ c.shardsList = nil
return firstErr
}
@@ -396,51 +436,48 @@ func (c *Ring) Pipeline() Pipeliner {
}
func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().pipelined(fn)
+ return c.Pipeline().Pipelined(fn)
}
-func (c *Ring) pipelineExec(cmds []Cmder) (firstErr error) {
+func (c *Ring) pipelineExec(cmds []Cmder) error {
cmdsMap := make(map[string][]Cmder)
for _, cmd := range cmds {
cmdInfo := c.cmdInfo(cmd.Name())
- name := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ name := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
if name != "" {
name = c.hash.Get(hashtag.Key(name))
}
cmdsMap[name] = append(cmdsMap[name], cmd)
}
- for i := 0; i <= c.opt.MaxRetries; i++ {
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
var failedCmdsMap map[string][]Cmder
for name, cmds := range cmdsMap {
shard, err := c.shardByName(name)
if err != nil {
setCmdsErr(cmds, err)
- if firstErr == nil {
- firstErr = err
- }
continue
}
cn, _, err := shard.Client.getConn()
if err != nil {
setCmdsErr(cmds, err)
- if firstErr == nil {
- firstErr = err
- }
continue
}
canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds)
- shard.Client.releaseConn(cn, err)
- if err == nil {
+ if err == nil || internal.IsRedisError(err) {
+ _ = shard.Client.connPool.Put(cn)
continue
}
- if firstErr == nil {
- firstErr = err
- }
- if canRetry && internal.IsRetryableError(err) {
+ _ = shard.Client.connPool.Remove(cn)
+
+ if canRetry && internal.IsRetryableError(err, true) {
if failedCmdsMap == nil {
failedCmdsMap = make(map[string][]Cmder)
}
@@ -454,5 +491,13 @@ func (c *Ring) pipelineExec(cmds []Cmder) (firstErr error) {
cmdsMap = failedCmdsMap
}
- return firstErr
+ return firstCmdsErr(cmds)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ panic("not implemented")
+}
+
+func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ panic("not implemented")
}
diff --git a/vendor/github.com/go-redis/redis/sentinel.go b/vendor/github.com/go-redis/redis/sentinel.go
index 3bfdb4a3f..37d06b482 100644
--- a/vendor/github.com/go-redis/redis/sentinel.go
+++ b/vendor/github.com/go-redis/redis/sentinel.go
@@ -301,8 +301,10 @@ func (d *sentinelFailover) listen(sentinel *sentinelClient) {
msg, err := pubsub.ReceiveMessage()
if err != nil {
- internal.Logf("sentinel: ReceiveMessage failed: %s", err)
- pubsub.Close()
+ if err != pool.ErrClosed {
+ internal.Logf("sentinel: ReceiveMessage failed: %s", err)
+ pubsub.Close()
+ }
d.resetSentinel()
return
}
diff --git a/vendor/github.com/go-redis/redis/tx.go b/vendor/github.com/go-redis/redis/tx.go
index 5ef89619b..11d5d5cb0 100644
--- a/vendor/github.com/go-redis/redis/tx.go
+++ b/vendor/github.com/go-redis/redis/tx.go
@@ -36,11 +36,10 @@ func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
return err
}
}
- firstErr := fn(tx)
- if err := tx.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
+
+ err := fn(tx)
+ _ = tx.Close()
+ return err
}
// close closes the transaction, releasing any open resources.
@@ -53,7 +52,7 @@ func (c *Tx) Close() error {
// of a transaction.
func (c *Tx) Watch(keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
- args[0] = "WATCH"
+ args[0] = "watch"
for i, key := range keys {
args[1+i] = key
}
@@ -65,7 +64,7 @@ func (c *Tx) Watch(keys ...string) *StatusCmd {
// Unwatch flushes all the previously watched keys for a transaction.
func (c *Tx) Unwatch(keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
- args[0] = "UNWATCH"
+ args[0] = "unwatch"
for i, key := range keys {
args[1+i] = key
}
@@ -92,5 +91,13 @@ func (c *Tx) Pipeline() Pipeliner {
// TxFailedErr is returned. Otherwise Exec returns error of the first
// failed command or nil.
func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().pipelined(fn)
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(fn)
+}
+
+func (c *Tx) TxPipeline() Pipeliner {
+ return c.Pipeline()
}
diff --git a/vendor/github.com/go-redis/redis/universal.go b/vendor/github.com/go-redis/redis/universal.go
index 4aa579fa4..29eb12b18 100644
--- a/vendor/github.com/go-redis/redis/universal.go
+++ b/vendor/github.com/go-redis/redis/universal.go
@@ -90,8 +90,8 @@ func (o *UniversalOptions) simple() *Options {
}
return &Options{
- Addr: addr,
- DB: o.DB,
+ Addr: addr,
+ DB: o.DB,
MaxRetries: o.MaxRetries,
Password: o.Password,
@@ -117,6 +117,9 @@ type UniversalClient interface {
Close() error
}
+var _ UniversalClient = (*Client)(nil)
+var _ UniversalClient = (*ClusterClient)(nil)
+
// NewUniversalClient returns a new multi client. The type of client returned depends
// on the following three conditions:
//
diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml
index 24e22f85a..93c67805b 100644
--- a/vendor/github.com/golang/protobuf/.travis.yml
+++ b/vendor/github.com/golang/protobuf/.travis.yml
@@ -4,6 +4,7 @@ go:
- 1.6.x
- 1.7.x
- 1.8.x
+- 1.9.x
install:
- go get -v -d -t github.com/golang/protobuf/...
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
index 795f53f6f..207eb6b48 100644
--- a/vendor/github.com/golang/protobuf/README.md
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -111,6 +111,7 @@ When the .proto file specifies `syntax="proto3"`, there are some differences:
Consider file test.proto, containing
```proto
+ syntax = "proto2";
package example;
enum FOO { X = 17; };
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
index 29bca020f..110ae1384 100644
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -73,6 +73,31 @@ type Marshaler struct {
// Whether to use the original (.proto) name for fields.
OrigName bool
+
+ // A custom URL resolver to use when marshaling Any messages to JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+ // Only the part of typeUrl after the last slash is relevant.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
}
// JSONPBMarshaler is implemented by protobuf messages that customize the
@@ -344,16 +369,17 @@ func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string)
turl := v.Field(0).String()
val := v.Field(1).Bytes()
- // Only the part of type_url after the last slash is relevant.
- mname := turl
- if slash := strings.LastIndex(mname, "/"); slash >= 0 {
- mname = mname[slash+1:]
+ var msg proto.Message
+ var err error
+ if m.AnyResolver != nil {
+ msg, err = m.AnyResolver.Resolve(turl)
+ } else {
+ msg, err = defaultResolveAny(turl)
}
- mt := proto.MessageType(mname)
- if mt == nil {
- return fmt.Errorf("unknown message type %q", mname)
+ if err != nil {
+ return err
}
- msg := reflect.New(mt.Elem()).Interface().(proto.Message)
+
if err := proto.Unmarshal(val, msg); err != nil {
return err
}
@@ -590,6 +616,12 @@ type Unmarshaler struct {
// Whether to allow messages to contain unknown fields, as opposed to
// failing to unmarshal.
AllowUnknownFields bool
+
+ // A custom URL resolver to use when unmarshaling Any messages from JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
}
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
@@ -641,7 +673,8 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe
if targetType.Kind() == reflect.Ptr {
// If input value is "null" and target is a pointer type, then the field should be treated as not set
// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
- if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) {
+ _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+ if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
return nil
}
target.Set(reflect.New(targetType.Elem()))
@@ -679,16 +712,17 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe
}
target.Field(0).SetString(turl)
- mname := turl
- if slash := strings.LastIndex(mname, "/"); slash >= 0 {
- mname = mname[slash+1:]
+ var m proto.Message
+ var err error
+ if u.AnyResolver != nil {
+ m, err = u.AnyResolver.Resolve(turl)
+ } else {
+ m, err = defaultResolveAny(turl)
}
- mt := proto.MessageType(mname)
- if mt == nil {
- return fmt.Errorf("unknown message type %q", mname)
+ if err != nil {
+ return err
}
- m := reflect.New(mt.Elem()).Interface().(proto.Message)
if _, ok := m.(wkt); ok {
val, ok := jsonFields["value"]
if !ok {
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
index 254caa6c4..2428d0566 100644
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
@@ -721,6 +721,65 @@ func TestUnmarshalingBadInput(t *testing.T) {
}
}
+type funcResolver func(turl string) (proto.Message, error)
+
+func (fn funcResolver) Resolve(turl string) (proto.Message, error) {
+ return fn(turl)
+}
+
+func TestAnyWithCustomResolver(t *testing.T) {
+ var resolvedTypeUrls []string
+ resolver := funcResolver(func(turl string) (proto.Message, error) {
+ resolvedTypeUrls = append(resolvedTypeUrls, turl)
+ return new(pb.Simple), nil
+ })
+ msg := &pb.Simple{
+ OBytes: []byte{1, 2, 3, 4},
+ OBool: proto.Bool(true),
+ OString: proto.String("foobar"),
+ OInt64: proto.Int64(1020304),
+ }
+ msgBytes, err := proto.Marshal(msg)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshaling message: %v", err)
+ }
+ // make an Any with a type URL that won't resolve w/out custom resolver
+ any := &anypb.Any{
+ TypeUrl: "https://foobar.com/some.random.MessageKind",
+ Value: msgBytes,
+ }
+
+ m := Marshaler{AnyResolver: resolver}
+ js, err := m.MarshalToString(any)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err)
+ }
+ if len(resolvedTypeUrls) != 1 {
+ t.Errorf("custom resolver was not invoked during marshaling")
+ } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" {
+ t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind")
+ }
+ wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}`
+ if js != wanted {
+ t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted)
+ }
+
+ u := Unmarshaler{AnyResolver: resolver}
+ roundTrip := &anypb.Any{}
+ err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err)
+ }
+ if len(resolvedTypeUrls) != 2 {
+ t.Errorf("custom resolver was not invoked during marshaling")
+ } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" {
+ t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind")
+ }
+ if !proto.Equal(any, roundTrip) {
+ t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any)
+ }
+}
+
func TestUnmarshalJSONPBUnmarshaler(t *testing.T) {
rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }`
var msg dynamicMessage
@@ -732,6 +791,19 @@ func TestUnmarshalJSONPBUnmarshaler(t *testing.T) {
}
}
+func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) {
+ rawJson := `{"stringField":null}`
+ var ptrFieldMsg ptrFieldMessage
+ if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil {
+ t.Errorf("unmarshal error: %v", err)
+ }
+
+ want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}}
+ if !proto.Equal(&ptrFieldMsg, &want) {
+ t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want)
+ }
+}
+
func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) {
rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }`
var got anypb.Any
@@ -762,6 +834,41 @@ func init() {
proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName)
}
+type ptrFieldMessage struct {
+ StringField *stringField `protobuf:"bytes,1,opt,name=stringField"`
+}
+
+func (m *ptrFieldMessage) Reset() {
+}
+
+func (m *ptrFieldMessage) String() string {
+ return m.StringField.StringValue
+}
+
+func (m *ptrFieldMessage) ProtoMessage() {
+}
+
+type stringField struct {
+ IsSet bool `protobuf:"varint,1,opt,name=isSet"`
+ StringValue string `protobuf:"bytes,2,opt,name=stringValue"`
+}
+
+func (s *stringField) Reset() {
+}
+
+func (s *stringField) String() string {
+ return s.StringValue
+}
+
+func (s *stringField) ProtoMessage() {
+}
+
+func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error {
+ s.IsSet = true
+ s.StringValue = string(js)
+ return nil
+}
+
// dynamicMessage implements protobuf.Message but is not a normal generated message type.
// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support.
type dynamicMessage struct {
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
index 1d92cb272..c6a91bcab 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -11,6 +11,7 @@ It has these top-level messages:
FileDescriptorSet
FileDescriptorProto
DescriptorProto
+ ExtensionRangeOptions
FieldDescriptorProto
OneofDescriptorProto
EnumDescriptorProto
@@ -137,7 +138,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Type(value)
return nil
}
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
type FieldDescriptorProto_Label int32
@@ -176,7 +177,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
return nil
}
func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{3, 1}
+ return fileDescriptor0, []int{4, 1}
}
// Generated classes can be optimized for speed or code size.
@@ -216,7 +217,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
*x = FileOptions_OptimizeMode(value)
return nil
}
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
type FieldOptions_CType int32
@@ -254,7 +255,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_CType(value)
return nil
}
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} }
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
type FieldOptions_JSType int32
@@ -294,7 +295,7 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_JSType(value)
return nil
}
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} }
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} }
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
@@ -335,7 +336,7 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
return nil
}
func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{16, 0}
+ return fileDescriptor0, []int{17, 0}
}
// The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -567,9 +568,10 @@ func (m *DescriptorProto) GetReservedName() []string {
}
type DescriptorProto_ExtensionRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
@@ -593,6 +595,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
return 0
}
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
// Range of reserved tag numbers. Reserved tag numbers may not be used by
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
@@ -623,6 +632,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
return 0
}
+type ExtensionRangeOptions struct {
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage() {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
// Describes a field within a message.
type FieldDescriptorProto struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
@@ -661,7 +697,7 @@ type FieldDescriptorProto struct {
func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FieldDescriptorProto) ProtoMessage() {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *FieldDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -743,7 +779,7 @@ type OneofDescriptorProto struct {
func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*OneofDescriptorProto) ProtoMessage() {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *OneofDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -770,7 +806,7 @@ type EnumDescriptorProto struct {
func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto) ProtoMessage() {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *EnumDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -804,7 +840,7 @@ type EnumValueDescriptorProto struct {
func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumValueDescriptorProto) ProtoMessage() {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *EnumValueDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -838,7 +874,7 @@ type ServiceDescriptorProto struct {
func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*ServiceDescriptorProto) ProtoMessage() {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ServiceDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -879,7 +915,7 @@ type MethodDescriptorProto struct {
func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*MethodDescriptorProto) ProtoMessage() {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
const Default_MethodDescriptorProto_ClientStreaming bool = false
const Default_MethodDescriptorProto_ServerStreaming bool = false
@@ -974,7 +1010,7 @@ type FileOptions struct {
CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
- PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+ PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
@@ -1009,7 +1045,7 @@ type FileOptions struct {
func (m *FileOptions) Reset() { *m = FileOptions{} }
func (m *FileOptions) String() string { return proto.CompactTextString(m) }
func (*FileOptions) ProtoMessage() {}
-func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
var extRange_FileOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1222,7 +1258,7 @@ type MessageOptions struct {
func (m *MessageOptions) Reset() { *m = MessageOptions{} }
func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
func (*MessageOptions) ProtoMessage() {}
-func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
var extRange_MessageOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1285,13 +1321,15 @@ type FieldOptions struct {
Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
- // represented as JavaScript strings. This avoids loss of precision that can
- // happen when a large value is converted to a floating point JavaScript
- // numbers. Specifying JS_NUMBER for the jstype causes the generated
- // JavaScript code to use the JavaScript "number" type instead of strings.
- // This option is an enum to permit additional types to be added,
- // e.g. goog.math.Integer.
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
// Should this field be parsed lazily? Lazy applies only to message-type
// fields. It means that when the outer message is initially parsed, the
@@ -1338,7 +1376,7 @@ type FieldOptions struct {
func (m *FieldOptions) Reset() { *m = FieldOptions{} }
func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
func (*FieldOptions) ProtoMessage() {}
-func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
var extRange_FieldOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1413,7 +1451,7 @@ type OneofOptions struct {
func (m *OneofOptions) Reset() { *m = OneofOptions{} }
func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
func (*OneofOptions) ProtoMessage() {}
-func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
var extRange_OneofOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1448,7 +1486,7 @@ type EnumOptions struct {
func (m *EnumOptions) Reset() { *m = EnumOptions{} }
func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
func (*EnumOptions) ProtoMessage() {}
-func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
var extRange_EnumOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1496,7 +1534,7 @@ type EnumValueOptions struct {
func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
func (*EnumValueOptions) ProtoMessage() {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
var extRange_EnumValueOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1537,7 +1575,7 @@ type ServiceOptions struct {
func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
func (*ServiceOptions) ProtoMessage() {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
var extRange_ServiceOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1579,7 +1617,7 @@ type MethodOptions struct {
func (m *MethodOptions) Reset() { *m = MethodOptions{} }
func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
func (*MethodOptions) ProtoMessage() {}
-func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
var extRange_MethodOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1635,7 +1673,7 @@ type UninterpretedOption struct {
func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption) ProtoMessage() {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
if m != nil {
@@ -1701,7 +1739,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio
func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{17, 0}
+ return fileDescriptor0, []int{18, 0}
}
func (m *UninterpretedOption_NamePart) GetNamePart() string {
@@ -1771,7 +1809,7 @@ type SourceCodeInfo struct {
func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo) ProtoMessage() {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
if m != nil {
@@ -1867,7 +1905,7 @@ type SourceCodeInfo_Location struct {
func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo_Location) ProtoMessage() {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} }
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
func (m *SourceCodeInfo_Location) GetPath() []int32 {
if m != nil {
@@ -1917,7 +1955,7 @@ type GeneratedCodeInfo struct {
func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo) ProtoMessage() {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
if m != nil {
@@ -1946,7 +1984,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_
func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{19, 0}
+ return fileDescriptor0, []int{20, 0}
}
func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
@@ -1983,6 +2021,7 @@ func init() {
proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
@@ -2014,161 +2053,163 @@ func init() {
func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 2490 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
- 0x15, 0x8e, 0x7e, 0x57, 0x3a, 0xd2, 0x6a, 0x67, 0x67, 0x37, 0x36, 0xbd, 0xf9, 0xf1, 0x5a, 0xf9,
- 0xf1, 0x3a, 0x69, 0xb4, 0xc1, 0xc6, 0x76, 0x9c, 0x4d, 0xe1, 0x42, 0x2b, 0xd1, 0x1b, 0xb9, 0x5a,
- 0x49, 0xa5, 0xb4, 0x8d, 0x9d, 0x1b, 0x62, 0x96, 0x1c, 0x49, 0xb4, 0x29, 0x92, 0x21, 0x29, 0xdb,
- 0x9b, 0x2b, 0x03, 0xbd, 0x2a, 0xd0, 0x07, 0x28, 0x8a, 0xa2, 0x17, 0xb9, 0x09, 0xd0, 0x07, 0x28,
- 0xd0, 0xbb, 0x3e, 0x41, 0x81, 0xbc, 0x41, 0x51, 0x14, 0x68, 0xdf, 0xa0, 0xb7, 0xc5, 0xcc, 0x90,
- 0x14, 0xa9, 0x1f, 0x7b, 0x1b, 0xc0, 0xc9, 0x95, 0x34, 0xdf, 0xf9, 0xce, 0x99, 0x33, 0x67, 0xce,
- 0xcc, 0x9c, 0x19, 0xc2, 0xee, 0xc8, 0xb6, 0x47, 0x26, 0xdd, 0x77, 0x5c, 0xdb, 0xb7, 0xcf, 0xa6,
- 0xc3, 0x7d, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0xd6, 0x38, 0x86, 0x37, 0x04, 0xa3, 0x16,
- 0x32, 0xaa, 0x27, 0xb0, 0x79, 0xcf, 0x30, 0x69, 0x33, 0x22, 0xf6, 0xa9, 0x8f, 0xef, 0x40, 0x76,
- 0x68, 0x98, 0x54, 0x4a, 0xed, 0x66, 0xf6, 0x4a, 0x07, 0xef, 0xd6, 0xe6, 0x94, 0x6a, 0x49, 0x8d,
- 0x1e, 0x83, 0x15, 0xae, 0x51, 0xfd, 0x57, 0x16, 0xb6, 0x96, 0x48, 0x31, 0x86, 0xac, 0x45, 0x26,
- 0xcc, 0x62, 0x6a, 0xaf, 0xa8, 0xf0, 0xff, 0x58, 0x82, 0x35, 0x87, 0x68, 0x8f, 0xc9, 0x88, 0x4a,
- 0x69, 0x0e, 0x87, 0x4d, 0xfc, 0x36, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa5, 0xcc,
- 0x6e, 0x66, 0xaf, 0xa8, 0xc4, 0x10, 0xfc, 0x21, 0x6c, 0x3a, 0xd3, 0x33, 0xd3, 0xd0, 0xd4, 0x18,
- 0x0d, 0x76, 0x33, 0x7b, 0x39, 0x05, 0x09, 0x41, 0x73, 0x46, 0xbe, 0x0e, 0x1b, 0x4f, 0x29, 0x79,
- 0x1c, 0xa7, 0x96, 0x38, 0xb5, 0xc2, 0xe0, 0x18, 0xb1, 0x01, 0xe5, 0x09, 0xf5, 0x3c, 0x32, 0xa2,
- 0xaa, 0x7f, 0xee, 0x50, 0x29, 0xcb, 0x47, 0xbf, 0xbb, 0x30, 0xfa, 0xf9, 0x91, 0x97, 0x02, 0xad,
- 0xc1, 0xb9, 0x43, 0x71, 0x1d, 0x8a, 0xd4, 0x9a, 0x4e, 0x84, 0x85, 0xdc, 0x8a, 0xf8, 0xc9, 0xd6,
- 0x74, 0x32, 0x6f, 0xa5, 0xc0, 0xd4, 0x02, 0x13, 0x6b, 0x1e, 0x75, 0x9f, 0x18, 0x1a, 0x95, 0xf2,
- 0xdc, 0xc0, 0xf5, 0x05, 0x03, 0x7d, 0x21, 0x9f, 0xb7, 0x11, 0xea, 0xe1, 0x06, 0x14, 0xe9, 0x33,
- 0x9f, 0x5a, 0x9e, 0x61, 0x5b, 0xd2, 0x1a, 0x37, 0xf2, 0xde, 0x92, 0x59, 0xa4, 0xa6, 0x3e, 0x6f,
- 0x62, 0xa6, 0x87, 0x6f, 0xc3, 0x9a, 0xed, 0xf8, 0x86, 0x6d, 0x79, 0x52, 0x61, 0x37, 0xb5, 0x57,
- 0x3a, 0x78, 0x73, 0x69, 0x22, 0x74, 0x05, 0x47, 0x09, 0xc9, 0xb8, 0x05, 0xc8, 0xb3, 0xa7, 0xae,
- 0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x03, 0x57, 0x17, 0x07, 0xc2,
- 0x89, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0xd1, 0xc6, 0x97, 0x20, 0xef, 0x9d,
- 0x5b, 0x3e, 0x79, 0x26, 0x95, 0x79, 0x86, 0x04, 0xad, 0xea, 0x7f, 0x73, 0xb0, 0x71, 0x91, 0x14,
- 0xfb, 0x1c, 0x72, 0x43, 0x36, 0x4a, 0x29, 0xfd, 0xff, 0xc4, 0x40, 0xe8, 0x24, 0x83, 0x98, 0xff,
- 0x81, 0x41, 0xac, 0x43, 0xc9, 0xa2, 0x9e, 0x4f, 0x75, 0x91, 0x11, 0x99, 0x0b, 0xe6, 0x14, 0x08,
- 0xa5, 0xc5, 0x94, 0xca, 0xfe, 0xa0, 0x94, 0x7a, 0x00, 0x1b, 0x91, 0x4b, 0xaa, 0x4b, 0xac, 0x51,
- 0x98, 0x9b, 0xfb, 0x2f, 0xf3, 0xa4, 0x26, 0x87, 0x7a, 0x0a, 0x53, 0x53, 0x2a, 0x34, 0xd1, 0xc6,
- 0x4d, 0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x15, 0x51, 0xea, 0x32, 0xca,
- 0x42, 0x94, 0x6c, 0x81, 0x6a, 0x26, 0xfe, 0x6c, 0x96, 0x6a, 0x6b, 0x2b, 0x32, 0xe5, 0x44, 0x2c,
- 0xb2, 0x85, 0x6c, 0x3b, 0x85, 0x8a, 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x18, 0x59, 0x91, 0x3b, 0x51,
- 0x7b, 0xe9, 0xc8, 0x94, 0x40, 0x4d, 0x0c, 0x6c, 0xdd, 0x8d, 0x37, 0xf1, 0x3b, 0x10, 0x01, 0x2a,
- 0x4f, 0x2b, 0xe0, 0xbb, 0x50, 0x39, 0x04, 0x3b, 0x64, 0x42, 0x77, 0xee, 0x40, 0x25, 0x19, 0x1e,
- 0xbc, 0x0d, 0x39, 0xcf, 0x27, 0xae, 0xcf, 0xb3, 0x30, 0xa7, 0x88, 0x06, 0x46, 0x90, 0xa1, 0x96,
- 0xce, 0x77, 0xb9, 0x9c, 0xc2, 0xfe, 0xee, 0x7c, 0x0a, 0xeb, 0x89, 0xee, 0x2f, 0xaa, 0x58, 0xfd,
- 0x7d, 0x1e, 0xb6, 0x97, 0xe5, 0xdc, 0xd2, 0xf4, 0xbf, 0x04, 0x79, 0x6b, 0x3a, 0x39, 0xa3, 0xae,
- 0x94, 0xe1, 0x16, 0x82, 0x16, 0xae, 0x43, 0xce, 0x24, 0x67, 0xd4, 0x94, 0xb2, 0xbb, 0xa9, 0xbd,
- 0xca, 0xc1, 0x87, 0x17, 0xca, 0xea, 0x5a, 0x9b, 0xa9, 0x28, 0x42, 0x13, 0xdf, 0x85, 0x6c, 0xb0,
- 0xc5, 0x31, 0x0b, 0x1f, 0x5c, 0xcc, 0x02, 0xcb, 0x45, 0x85, 0xeb, 0xe1, 0x37, 0xa0, 0xc8, 0x7e,
- 0x45, 0x6c, 0xf3, 0xdc, 0xe7, 0x02, 0x03, 0x58, 0x5c, 0xf1, 0x0e, 0x14, 0x78, 0x9a, 0xe9, 0x34,
- 0x3c, 0x1a, 0xa2, 0x36, 0x9b, 0x18, 0x9d, 0x0e, 0xc9, 0xd4, 0xf4, 0xd5, 0x27, 0xc4, 0x9c, 0x52,
- 0x9e, 0x30, 0x45, 0xa5, 0x1c, 0x80, 0xbf, 0x66, 0x18, 0xbe, 0x0a, 0x25, 0x91, 0x95, 0x86, 0xa5,
- 0xd3, 0x67, 0x7c, 0xf7, 0xc9, 0x29, 0x22, 0x51, 0x5b, 0x0c, 0x61, 0xdd, 0x3f, 0xf2, 0x6c, 0x2b,
- 0x9c, 0x5a, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x9f, 0xce, 0x6f, 0x7c, 0x6f, 0x2d, 0x1f, 0xde, 0x7c,
- 0x2e, 0x56, 0xff, 0x92, 0x86, 0x2c, 0x5f, 0x6f, 0x1b, 0x50, 0x1a, 0x3c, 0xec, 0xc9, 0x6a, 0xb3,
- 0x7b, 0x7a, 0xd4, 0x96, 0x51, 0x0a, 0x57, 0x00, 0x38, 0x70, 0xaf, 0xdd, 0xad, 0x0f, 0x50, 0x3a,
- 0x6a, 0xb7, 0x3a, 0x83, 0xdb, 0x37, 0x51, 0x26, 0x52, 0x38, 0x15, 0x40, 0x36, 0x4e, 0xf8, 0xe4,
- 0x00, 0xe5, 0x30, 0x82, 0xb2, 0x30, 0xd0, 0x7a, 0x20, 0x37, 0x6f, 0xdf, 0x44, 0xf9, 0x24, 0xf2,
- 0xc9, 0x01, 0x5a, 0xc3, 0xeb, 0x50, 0xe4, 0xc8, 0x51, 0xb7, 0xdb, 0x46, 0x85, 0xc8, 0x66, 0x7f,
- 0xa0, 0xb4, 0x3a, 0xc7, 0xa8, 0x18, 0xd9, 0x3c, 0x56, 0xba, 0xa7, 0x3d, 0x04, 0x91, 0x85, 0x13,
- 0xb9, 0xdf, 0xaf, 0x1f, 0xcb, 0xa8, 0x14, 0x31, 0x8e, 0x1e, 0x0e, 0xe4, 0x3e, 0x2a, 0x27, 0xdc,
- 0xfa, 0xe4, 0x00, 0xad, 0x47, 0x5d, 0xc8, 0x9d, 0xd3, 0x13, 0x54, 0xc1, 0x9b, 0xb0, 0x2e, 0xba,
- 0x08, 0x9d, 0xd8, 0x98, 0x83, 0x6e, 0xdf, 0x44, 0x68, 0xe6, 0x88, 0xb0, 0xb2, 0x99, 0x00, 0x6e,
- 0xdf, 0x44, 0xb8, 0xda, 0x80, 0x1c, 0xcf, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc9, 0x6d, 0xb5,
- 0xdb, 0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0x66, 0x98, 0x22, 0xff, 0xea, 0xb4, 0xa5, 0xc8,
- 0x4d, 0x94, 0x8e, 0x63, 0x3d, 0xb9, 0x3e, 0x90, 0x9b, 0x28, 0x53, 0xd5, 0x60, 0x7b, 0xd9, 0x3e,
- 0xb3, 0x74, 0x65, 0xc4, 0xa6, 0x38, 0xbd, 0x62, 0x8a, 0xb9, 0xad, 0x85, 0x29, 0xfe, 0x36, 0x05,
- 0x5b, 0x4b, 0xf6, 0xda, 0xa5, 0x9d, 0xfc, 0x02, 0x72, 0x22, 0x45, 0xc5, 0xe9, 0x73, 0x63, 0xe9,
- 0xa6, 0xcd, 0x13, 0x76, 0xe1, 0x04, 0xe2, 0x7a, 0xf1, 0x13, 0x38, 0xb3, 0xe2, 0x04, 0x66, 0x26,
- 0x16, 0x9c, 0xfc, 0x4d, 0x0a, 0xa4, 0x55, 0xb6, 0x5f, 0xb2, 0x51, 0xa4, 0x13, 0x1b, 0xc5, 0xe7,
- 0xf3, 0x0e, 0x5c, 0x5b, 0x3d, 0x86, 0x05, 0x2f, 0xbe, 0x4b, 0xc1, 0xa5, 0xe5, 0x85, 0xca, 0x52,
- 0x1f, 0xee, 0x42, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x1e, 0xd6, 0xef, 0x2f, 0x39, 0x02, 0x98, 0x78,
- 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xc9, 0xac, 0xaa, 0x36, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x36,
- 0x0d, 0xaf, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xb7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xe2, 0x40, 0x16,
+ // 2519 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7,
+ 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63,
+ 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec,
+ 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad,
+ 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50,
+ 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb,
+ 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33,
+ 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d,
+ 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90,
+ 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43,
+ 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4,
+ 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61,
+ 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a,
+ 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76,
+ 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68,
+ 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3,
+ 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55,
+ 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d,
+ 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6,
+ 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7,
+ 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa,
+ 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13,
+ 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2,
+ 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35,
+ 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e,
+ 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2,
+ 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec,
+ 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07,
+ 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94,
+ 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2,
+ 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e,
+ 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16,
+ 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91,
+ 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda,
+ 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79,
+ 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1,
+ 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9,
+ 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67,
+ 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b,
+ 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65,
+ 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba,
+ 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e,
+ 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48,
+ 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c,
+ 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1,
+ 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91,
+ 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78,
+ 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c,
+ 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e,
+ 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61,
+ 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73,
+ 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76,
+ 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47,
+ 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f,
+ 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc,
+ 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f,
+ 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54,
+ 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e,
+ 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d,
+ 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7,
+ 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda,
+ 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4,
+ 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f,
+ 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82,
+ 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4,
+ 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13,
+ 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3,
+ 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9,
+ 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78,
+ 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e,
+ 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10,
0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80,
- 0x38, 0xe1, 0xce, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xed, 0x15, 0x23, 0x5d, 0x38, 0xeb, 0x3e, 0x06,
+ 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03,
0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1,
- 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0x4f, 0x19, 0x37,
- 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0x6b, 0x01, 0x4a, 0xb1, 0xb2, 0x0e, 0x5f,
- 0x83, 0xf2, 0x23, 0xf2, 0x84, 0xa8, 0x61, 0xa9, 0x2e, 0x22, 0x51, 0x62, 0x58, 0x2f, 0x28, 0xd7,
- 0x3f, 0x86, 0x6d, 0x4e, 0xb1, 0xa7, 0x3e, 0x75, 0x55, 0xcd, 0x24, 0x9e, 0xc7, 0x83, 0x56, 0xe0,
- 0x54, 0xcc, 0x64, 0x5d, 0x26, 0x6a, 0x84, 0x12, 0x7c, 0x0b, 0xb6, 0xb8, 0xc6, 0x64, 0x6a, 0xfa,
- 0x86, 0x63, 0x52, 0x95, 0x5d, 0x1e, 0x3c, 0xbe, 0x11, 0x47, 0x9e, 0x6d, 0x32, 0xc6, 0x49, 0x40,
- 0x60, 0x1e, 0x79, 0xb8, 0x09, 0x6f, 0x71, 0xb5, 0x11, 0xb5, 0xa8, 0x4b, 0x7c, 0xaa, 0xd2, 0xaf,
- 0xa7, 0xc4, 0xf4, 0x54, 0x62, 0xe9, 0xea, 0x98, 0x78, 0x63, 0x69, 0x9b, 0x19, 0x38, 0x4a, 0x4b,
- 0x29, 0xe5, 0x0a, 0x23, 0x1e, 0x07, 0x3c, 0x99, 0xd3, 0xea, 0x96, 0xfe, 0x05, 0xf1, 0xc6, 0xf8,
- 0x10, 0x2e, 0x71, 0x2b, 0x9e, 0xef, 0x1a, 0xd6, 0x48, 0xd5, 0xc6, 0x54, 0x7b, 0xac, 0x4e, 0xfd,
- 0xe1, 0x1d, 0xe9, 0x8d, 0x78, 0xff, 0xdc, 0xc3, 0x3e, 0xe7, 0x34, 0x18, 0xe5, 0xd4, 0x1f, 0xde,
- 0xc1, 0x7d, 0x28, 0xb3, 0xc9, 0x98, 0x18, 0xdf, 0x50, 0x75, 0x68, 0xbb, 0xfc, 0x64, 0xa9, 0x2c,
- 0x59, 0xd9, 0xb1, 0x08, 0xd6, 0xba, 0x81, 0xc2, 0x89, 0xad, 0xd3, 0xc3, 0x5c, 0xbf, 0x27, 0xcb,
- 0x4d, 0xa5, 0x14, 0x5a, 0xb9, 0x67, 0xbb, 0x2c, 0xa1, 0x46, 0x76, 0x14, 0xe0, 0x92, 0x48, 0xa8,
- 0x91, 0x1d, 0x86, 0xf7, 0x16, 0x6c, 0x69, 0x9a, 0x18, 0xb3, 0xa1, 0xa9, 0x41, 0x89, 0xef, 0x49,
- 0x28, 0x11, 0x2c, 0x4d, 0x3b, 0x16, 0x84, 0x20, 0xc7, 0x3d, 0xfc, 0x19, 0xbc, 0x3e, 0x0b, 0x56,
- 0x5c, 0x71, 0x73, 0x61, 0x94, 0xf3, 0xaa, 0xb7, 0x60, 0xcb, 0x39, 0x5f, 0x54, 0xc4, 0x89, 0x1e,
- 0x9d, 0xf3, 0x79, 0xb5, 0x4f, 0x61, 0xdb, 0x19, 0x3b, 0x8b, 0x7a, 0x5b, 0x71, 0x3d, 0xec, 0x8c,
- 0x9d, 0x79, 0xc5, 0xf7, 0xf8, 0x7d, 0xcf, 0xa5, 0x1a, 0xf1, 0xa9, 0x2e, 0x5d, 0x8e, 0xd3, 0x63,
- 0x02, 0xbc, 0x0f, 0x48, 0xd3, 0x54, 0x6a, 0x91, 0x33, 0x93, 0xaa, 0xc4, 0xa5, 0x16, 0xf1, 0xa4,
- 0xab, 0x71, 0x72, 0x45, 0xd3, 0x64, 0x2e, 0xad, 0x73, 0x21, 0xfe, 0x00, 0x36, 0xed, 0xb3, 0x47,
- 0x9a, 0x48, 0x49, 0xd5, 0x71, 0xe9, 0xd0, 0x78, 0x26, 0xbd, 0xcb, 0xe3, 0xbb, 0xc1, 0x04, 0x3c,
- 0x21, 0x7b, 0x1c, 0xc6, 0x37, 0x00, 0x69, 0xde, 0x98, 0xb8, 0x0e, 0xaf, 0x09, 0x3c, 0x87, 0x68,
- 0x54, 0x7a, 0x4f, 0x50, 0x05, 0xde, 0x09, 0x61, 0xb6, 0x24, 0xbc, 0xa7, 0xc6, 0xd0, 0x0f, 0x2d,
- 0x5e, 0x17, 0x4b, 0x82, 0x63, 0x81, 0xb5, 0x3d, 0x40, 0x2c, 0x14, 0x89, 0x8e, 0xf7, 0x38, 0xad,
- 0xe2, 0x8c, 0x9d, 0x78, 0xbf, 0xef, 0xc0, 0x3a, 0x63, 0xce, 0x3a, 0xbd, 0x21, 0xea, 0x19, 0x67,
- 0x1c, 0xeb, 0xf1, 0x01, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x5d, 0x26, 0xc4,
- 0x8e, 0x20, 0xfd, 0x7b, 0x6d, 0xc5, 0x75, 0xe0, 0x34, 0xce, 0x16, 0x89, 0xa8, 0x6c, 0x4d, 0x17,
- 0xc1, 0xea, 0x21, 0x94, 0xe3, 0xf9, 0x89, 0x8b, 0x20, 0x32, 0x14, 0xa5, 0xd8, 0x59, 0xdf, 0xe8,
- 0x36, 0xd9, 0x29, 0xfd, 0x95, 0x8c, 0xd2, 0xac, 0x5a, 0x68, 0xb7, 0x06, 0xb2, 0xaa, 0x9c, 0x76,
- 0x06, 0xad, 0x13, 0x19, 0x65, 0x3e, 0x28, 0x16, 0xfe, 0xb3, 0x86, 0x9e, 0x3f, 0x7f, 0xfe, 0x3c,
- 0x7d, 0x3f, 0x5b, 0x78, 0x1f, 0x5d, 0xaf, 0x7e, 0x9f, 0x86, 0x4a, 0xb2, 0x4e, 0xc7, 0x3f, 0x87,
- 0xcb, 0xe1, 0xa5, 0xda, 0xa3, 0xbe, 0xfa, 0xd4, 0x70, 0xf9, 0xc2, 0x99, 0x10, 0x51, 0xe9, 0x46,
- 0x53, 0xb7, 0x1d, 0xb0, 0xfa, 0xd4, 0xff, 0xd2, 0x70, 0xd9, 0xb2, 0x98, 0x10, 0x1f, 0xb7, 0xe1,
- 0xaa, 0x65, 0xab, 0x9e, 0x4f, 0x2c, 0x9d, 0xb8, 0xba, 0x3a, 0x7b, 0xce, 0x50, 0x89, 0xa6, 0x51,
- 0xcf, 0xb3, 0xc5, 0x81, 0x15, 0x59, 0x79, 0xd3, 0xb2, 0xfb, 0x01, 0x79, 0xb6, 0x93, 0xd7, 0x03,
- 0xea, 0x5c, 0x9a, 0x65, 0x56, 0xa5, 0xd9, 0x1b, 0x50, 0x9c, 0x10, 0x47, 0xa5, 0x96, 0xef, 0x9e,
- 0xf3, 0xea, 0xb2, 0xa0, 0x14, 0x26, 0xc4, 0x91, 0x59, 0xfb, 0xd5, 0xcd, 0x44, 0x32, 0x9a, 0x05,
- 0x54, 0xbc, 0x9f, 0x2d, 0x14, 0x11, 0x54, 0xff, 0x99, 0x81, 0x72, 0xbc, 0xda, 0x64, 0xc5, 0xbb,
- 0xc6, 0x4f, 0x96, 0x14, 0xdf, 0x7b, 0xde, 0x79, 0x61, 0x6d, 0x5a, 0x6b, 0xb0, 0x23, 0xe7, 0x30,
- 0x2f, 0x6a, 0x40, 0x45, 0x68, 0xb2, 0xe3, 0x9e, 0xed, 0x36, 0x54, 0xdc, 0x2c, 0x0a, 0x4a, 0xd0,
- 0xc2, 0xc7, 0x90, 0x7f, 0xe4, 0x71, 0xdb, 0x79, 0x6e, 0xfb, 0xdd, 0x17, 0xdb, 0xbe, 0xdf, 0xe7,
- 0xc6, 0x8b, 0xf7, 0xfb, 0x6a, 0xa7, 0xab, 0x9c, 0xd4, 0xdb, 0x4a, 0xa0, 0x8e, 0xaf, 0x40, 0xd6,
- 0x24, 0xdf, 0x9c, 0x27, 0x0f, 0x27, 0x0e, 0x5d, 0x74, 0x12, 0xae, 0x40, 0xf6, 0x29, 0x25, 0x8f,
- 0x93, 0x47, 0x02, 0x87, 0x5e, 0xe1, 0x62, 0xd8, 0x87, 0x1c, 0x8f, 0x17, 0x06, 0x08, 0x22, 0x86,
- 0x5e, 0xc3, 0x05, 0xc8, 0x36, 0xba, 0x0a, 0x5b, 0x10, 0x08, 0xca, 0x02, 0x55, 0x7b, 0x2d, 0xb9,
- 0x21, 0xa3, 0x74, 0xf5, 0x16, 0xe4, 0x45, 0x10, 0xd8, 0x62, 0x89, 0xc2, 0x80, 0x5e, 0x0b, 0x9a,
- 0x81, 0x8d, 0x54, 0x28, 0x3d, 0x3d, 0x39, 0x92, 0x15, 0x94, 0x4e, 0x4e, 0x75, 0x16, 0xe5, 0xaa,
- 0x1e, 0x94, 0xe3, 0xe5, 0xe6, 0x8f, 0x92, 0x65, 0xd5, 0xbf, 0xa5, 0xa0, 0x14, 0x2b, 0x1f, 0x59,
- 0xe1, 0x42, 0x4c, 0xd3, 0x7e, 0xaa, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43,
- 0x2e, 0x3a, 0x75, 0x3f, 0xd2, 0x12, 0xc9, 0xa1, 0x7c, 0xf5, 0x4f, 0x29, 0x40, 0xf3, 0x05, 0xe8,
- 0x9c, 0x9b, 0xa9, 0x9f, 0xd2, 0xcd, 0xea, 0x1f, 0x53, 0x50, 0x49, 0x56, 0x9d, 0x73, 0xee, 0x5d,
- 0xfb, 0x49, 0xdd, 0xfb, 0x47, 0x1a, 0xd6, 0x13, 0xb5, 0xe6, 0x45, 0xbd, 0xfb, 0x1a, 0x36, 0x0d,
- 0x9d, 0x4e, 0x1c, 0xdb, 0xa7, 0x96, 0x76, 0xae, 0x9a, 0xf4, 0x09, 0x35, 0xa5, 0x2a, 0xdf, 0x34,
- 0xf6, 0x5f, 0x5c, 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0x93,
- 0x5e, 0x77, 0x20, 0x77, 0x1a, 0x0f, 0xd5, 0xd3, 0xce, 0x2f, 0x3b, 0xdd, 0x2f, 0x3b, 0x0a, 0x32,
- 0xe6, 0x68, 0xaf, 0x70, 0xd9, 0xf7, 0x00, 0xcd, 0x3b, 0x85, 0x2f, 0xc3, 0x32, 0xb7, 0xd0, 0x6b,
- 0x78, 0x0b, 0x36, 0x3a, 0x5d, 0xb5, 0xdf, 0x6a, 0xca, 0xaa, 0x7c, 0xef, 0x9e, 0xdc, 0x18, 0xf4,
- 0xc5, 0xf5, 0x3e, 0x62, 0x0f, 0x12, 0x0b, 0xbc, 0xfa, 0x87, 0x0c, 0x6c, 0x2d, 0xf1, 0x04, 0xd7,
- 0x83, 0x9b, 0x85, 0xb8, 0xec, 0x7c, 0x74, 0x11, 0xef, 0x6b, 0xac, 0x20, 0xe8, 0x11, 0xd7, 0x0f,
- 0x2e, 0x22, 0x37, 0x80, 0x45, 0xc9, 0xf2, 0x8d, 0xa1, 0x41, 0xdd, 0xe0, 0x35, 0x44, 0x5c, 0x37,
- 0x36, 0x66, 0xb8, 0x78, 0x10, 0xf9, 0x19, 0x60, 0xc7, 0xf6, 0x0c, 0xdf, 0x78, 0x42, 0x55, 0xc3,
- 0x0a, 0x9f, 0x4e, 0xd8, 0xf5, 0x23, 0xab, 0xa0, 0x50, 0xd2, 0xb2, 0xfc, 0x88, 0x6d, 0xd1, 0x11,
- 0x99, 0x63, 0xb3, 0xcd, 0x3c, 0xa3, 0xa0, 0x50, 0x12, 0xb1, 0xaf, 0x41, 0x59, 0xb7, 0xa7, 0xac,
- 0x26, 0x13, 0x3c, 0x76, 0x76, 0xa4, 0x94, 0x92, 0xc0, 0x22, 0x4a, 0x50, 0x6d, 0xcf, 0xde, 0x6c,
- 0xca, 0x4a, 0x49, 0x60, 0x82, 0x72, 0x1d, 0x36, 0xc8, 0x68, 0xe4, 0x32, 0xe3, 0xa1, 0x21, 0x71,
- 0x7f, 0xa8, 0x44, 0x30, 0x27, 0xee, 0xdc, 0x87, 0x42, 0x18, 0x07, 0x76, 0x54, 0xb3, 0x48, 0xa8,
- 0x8e, 0x78, 0x39, 0x4b, 0xef, 0x15, 0x95, 0x82, 0x15, 0x0a, 0xaf, 0x41, 0xd9, 0xf0, 0xd4, 0xd9,
- 0x13, 0x6e, 0x7a, 0x37, 0xbd, 0x57, 0x50, 0x4a, 0x86, 0x17, 0xbd, 0xd9, 0x55, 0xbf, 0x4b, 0x43,
- 0x25, 0xf9, 0x04, 0x8d, 0x9b, 0x50, 0x30, 0x6d, 0x8d, 0xf0, 0xd4, 0x12, 0xdf, 0x3f, 0xf6, 0x5e,
- 0xf2, 0x6a, 0x5d, 0x6b, 0x07, 0x7c, 0x25, 0xd2, 0xdc, 0xf9, 0x7b, 0x0a, 0x0a, 0x21, 0x8c, 0x2f,
- 0x41, 0xd6, 0x21, 0xfe, 0x98, 0x9b, 0xcb, 0x1d, 0xa5, 0x51, 0x4a, 0xe1, 0x6d, 0x86, 0x7b, 0x0e,
- 0xb1, 0x78, 0x0a, 0x04, 0x38, 0x6b, 0xb3, 0x79, 0x35, 0x29, 0xd1, 0xf9, 0xe5, 0xc4, 0x9e, 0x4c,
- 0xa8, 0xe5, 0x7b, 0xe1, 0xbc, 0x06, 0x78, 0x23, 0x80, 0xf1, 0x87, 0xb0, 0xe9, 0xbb, 0xc4, 0x30,
- 0x13, 0xdc, 0x2c, 0xe7, 0xa2, 0x50, 0x10, 0x91, 0x0f, 0xe1, 0x4a, 0x68, 0x57, 0xa7, 0x3e, 0xd1,
- 0xc6, 0x54, 0x9f, 0x29, 0xe5, 0xf9, 0xfb, 0xe6, 0xe5, 0x80, 0xd0, 0x0c, 0xe4, 0xa1, 0x6e, 0xf5,
- 0xfb, 0x14, 0x6c, 0x86, 0xd7, 0x29, 0x3d, 0x0a, 0xd6, 0x09, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87,
- 0x6b, 0x31, 0x95, 0x17, 0xf4, 0x6a, 0xf5, 0x48, 0x49, 0x89, 0x19, 0xd8, 0x99, 0x00, 0xcc, 0x24,
- 0x2b, 0xc3, 0x76, 0x15, 0x4a, 0xc1, 0xf7, 0x05, 0xfe, 0x91, 0x4a, 0x5c, 0xc0, 0x41, 0x40, 0xec,
- 0xde, 0x85, 0xb7, 0x21, 0x77, 0x46, 0x47, 0x86, 0x15, 0xbc, 0x7a, 0x8a, 0x46, 0xf8, 0x96, 0x9a,
- 0x8d, 0xde, 0x52, 0x8f, 0x7e, 0x97, 0x82, 0x2d, 0xcd, 0x9e, 0xcc, 0xfb, 0x7b, 0x84, 0xe6, 0x5e,
- 0x01, 0xbc, 0x2f, 0x52, 0x5f, 0xdd, 0x1d, 0x19, 0xfe, 0x78, 0x7a, 0x56, 0xd3, 0xec, 0xc9, 0xfe,
- 0xc8, 0x36, 0x89, 0x35, 0x9a, 0x7d, 0x65, 0xe3, 0x7f, 0xb4, 0x8f, 0x46, 0xd4, 0xfa, 0x68, 0x64,
- 0xc7, 0xbe, 0xb9, 0x7d, 0x3e, 0xfb, 0xfb, 0x6d, 0x3a, 0x73, 0xdc, 0x3b, 0xfa, 0x73, 0x7a, 0xe7,
- 0x58, 0xf4, 0xd5, 0x0b, 0x63, 0xa3, 0xd0, 0xa1, 0x49, 0x35, 0x36, 0xde, 0xff, 0x05, 0x00, 0x00,
- 0xff, 0xff, 0xa2, 0xc3, 0x4e, 0x18, 0xbe, 0x1b, 0x00, 0x00,
+ 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37,
+ 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f,
+ 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e,
+ 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0,
+ 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4,
+ 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80,
+ 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f,
+ 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96,
+ 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1,
+ 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa,
+ 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc,
+ 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59,
+ 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96,
+ 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50,
+ 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27,
+ 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58,
+ 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a,
+ 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67,
+ 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e,
+ 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27,
+ 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f,
+ 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0,
+ 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2,
+ 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4,
+ 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4,
+ 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c,
+ 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43,
+ 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76,
+ 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b,
+ 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28,
+ 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e,
+ 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55,
+ 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2,
+ 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd,
+ 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59,
+ 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27,
+ 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64,
+ 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d,
+ 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18,
+ 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1,
+ 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5,
+ 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24,
+ 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8,
+ 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94,
+ 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91,
+ 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72,
+ 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c,
+ 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75,
+ 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4,
+ 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee,
+ 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e,
+ 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f,
+ 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20,
+ 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e,
+ 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8,
+ 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb,
+ 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16,
+ 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc,
+ 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1,
+ 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a,
+ 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd,
+ 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0,
+ 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25,
+ 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12,
+ 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e,
+ 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4,
+ 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09,
+ 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd,
+ 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43,
+ 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14,
+ 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7,
+ 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59,
+ 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e,
+ 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8,
+ 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a,
+ 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed,
+ 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f,
+ 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4,
+ 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f,
+ 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24,
+ 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66,
+ 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57,
+ 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c,
+ 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
index 70b82a4dc..4d4fb378f 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -101,6 +101,8 @@ message DescriptorProto {
message ExtensionRange {
optional int32 start = 1;
optional int32 end = 2;
+
+ optional ExtensionRangeOptions options = 3;
}
repeated ExtensionRange extension_range = 5;
@@ -121,6 +123,14 @@ message DescriptorProto {
repeated string reserved_name = 10;
}
+message ExtensionRangeOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
// Describes a field within a message.
message FieldDescriptorProto {
enum Type {
@@ -351,7 +361,7 @@ message FileOptions {
optional bool cc_generic_services = 16 [default=false];
optional bool java_generic_services = 17 [default=false];
optional bool py_generic_services = 18 [default=false];
- optional bool php_generic_services = 19 [default=false];
+ optional bool php_generic_services = 42 [default=false];
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
@@ -483,13 +493,15 @@ message FieldOptions {
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
- // represented as JavaScript strings. This avoids loss of precision that can
- // happen when a large value is converted to a floating point JavaScript
- // numbers. Specifying JS_NUMBER for the jstype causes the generated
- // JavaScript code to use the JavaScript "number" type instead of strings.
- // This option is an enum to permit additional types to be added,
- // e.g. goog.math.Integer.
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
optional JSType jstype = 6 [default = JS_NORMAL];
enum JSType {
// Use the default type.
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
index 211ab5d3a..60d524645 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -1984,7 +1984,7 @@ func (g *Generator) generateMessage(message *Descriptor) {
case typename == "string":
def = strconv.Quote(def)
case typename == "[]byte":
- def = "[]byte(" + strconv.Quote(def) + ")"
+ def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
kind = "var "
case def == "inf", def == "-inf", def == "nan":
// These names are known to, and defined by, the protocol language.
@@ -2508,6 +2508,67 @@ func (g *Generator) generateMessage(message *Descriptor) {
g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName)
}
+var escapeChars = [256]byte{
+ 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+ // NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+ // single and double quotes, but strconv.Unquote only allows one or the
+ // other (based on actual surrounding quotes of its input argument).
+
+ var out []byte
+ for len(s) > 0 {
+ // regular character, or too short to be valid escape
+ if s[0] != '\\' || len(s) < 2 {
+ out = append(out, s[0])
+ s = s[1:]
+ } else if c := escapeChars[s[1]]; c != 0 {
+ // escape sequence
+ out = append(out, c)
+ s = s[2:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ // hex escape, e.g. "\x80
+ if len(s) < 4 {
+ // too short to be valid
+ out = append(out, s[:2]...)
+ s = s[2:]
+ continue
+ }
+ v, err := strconv.ParseUint(s[2:4], 16, 8)
+ if err != nil {
+ out = append(out, s[:4]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[4:]
+ } else if '0' <= s[1] && s[1] <= '7' {
+ // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+ // so consume up to 2 more bytes or up to end-of-string
+ n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[1+n:]
+ } else {
+ // bad escape, just propagate the slash as-is
+ out = append(out, s[0])
+ s = s[1:]
+ }
+ }
+
+ return string(out)
+}
+
func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
ccTypeName := ext.DescName()
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
index a5ebc8533..76808f3b7 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
@@ -83,3 +83,32 @@ func TestGoPackageOption(t *testing.T) {
}
}
}
+
+func TestUnescape(t *testing.T) {
+ tests := []struct {
+ in string
+ out string
+ }{
+ // successful cases, including all kinds of escapes
+ {"", ""},
+ {"foo bar baz frob nitz", "foo bar baz frob nitz"},
+ {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})},
+ {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})},
+ {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})},
+ // variable length octal escapes
+ {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})},
+ // malformed escape sequences left as is
+ {"foo \\g bar", "foo \\g bar"},
+ {"foo \\xg0 bar", "foo \\xg0 bar"},
+ {"\\", "\\"},
+ {"\\x", "\\x"},
+ {"\\xf", "\\xf"},
+ {"\\777", "\\777"}, // overflows byte
+ }
+ for _, tc := range tests {
+ s := unescape(tc.in)
+ if s != tc.out {
+ t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
index f04dc73c8..5b5574529 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -91,6 +91,7 @@ message CodeGeneratorRequest {
// The version number of protocol compiler.
optional Version compiler_version = 3;
+
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 6c9a6cf74..f34601723 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -62,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index 9bd3f50a4..c74866762 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -74,6 +74,16 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index 56c67134f..8dcd71887 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -15,7 +15,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
-* URL hosts and paths can have variables with an optional regular expression.
+* URL hosts, paths and query values can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
@@ -24,9 +24,9 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
-* [Listing Routes](#listing-routes)
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
* [Full Example](#full-example)
---
@@ -168,7 +168,6 @@ s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
-
### Listing Routes
Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
@@ -191,9 +190,9 @@ func handler(w http.ResponseWriter, r *http.Request) {
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
- r.Methods("POST").HandleFunc("/products", handler)
- r.Methods("GET").HandleFunc("/articles", handler)
- r.Methods("GET", "PUT").HandleFunc("/articles/{id}", handler)
+ r.HandleFunc("/products", handler).Methods("POST")
+ r.HandleFunc("/articles", handler).Methods("GET")
+ r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
t, err := route.GetPathTemplate()
if err != nil {
@@ -269,19 +268,21 @@ url, err := r.Get("article").URL("category", "technology", "id", "42")
"/articles/technology/42"
```
-This also works for host variables:
+This also works for host and query value variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
-// url.String() will be "http://news.domain.com/articles/technology/42"
+// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
- "id", "42")
+ "id", "42",
+ "filter", "gorilla")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
@@ -319,6 +320,37 @@ url, err := r.Get("article").URL("subdomain", "news",
"id", "42")
```
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.HandleFunc("/products", handler).Methods("POST")
+r.HandleFunc("/articles", handler).Methods("GET")
+r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ t, err := route.GetPathTemplate()
+ if err != nil {
+ return err
+ }
+ // p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages.
+ // For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P<v0>[^/]+)$'.
+ p, err := route.GetPathRegexp()
+ if err != nil {
+ return err
+ }
+ m, err := route.GetMethods()
+ if err != nil {
+ return err
+ }
+ fmt.Println(strings.Join(m, ","), t, p)
+ return nil
+})
+```
+
## Full Example
Here's a complete, runnable example of a small `mux` based server:
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
index 00daf4a72..cce30b2f0 100644
--- a/vendor/github.com/gorilla/mux/doc.go
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -12,8 +12,8 @@ or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
- * URL hosts and paths can have variables with an optional regular
- expression.
+ * URL hosts, paths and query values can have variables with an optional
+ regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
@@ -188,18 +188,20 @@ key/value pairs for the route variables. For the previous route, we would do:
"/articles/technology/42"
-This also works for host variables:
+This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
- // url.String() will be "http://news.domain.com/articles/technology/42"
+ // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
- "id", "42")
+ "id", "42",
+ "filter", "gorilla")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index d66ec3841..fb69196db 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -13,6 +13,10 @@ import (
"strings"
)
+var (
+ ErrMethodMismatch = errors.New("method is not allowed")
+)
+
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
@@ -39,6 +43,10 @@ func NewRouter() *Router {
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
+
+ // Configurable Handler to be used when the request method does not match the route.
+ MethodNotAllowedHandler http.Handler
+
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order.
@@ -65,6 +73,11 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
}
}
+ if match.MatchErr == ErrMethodMismatch && r.MethodNotAllowedHandler != nil {
+ match.Handler = r.MethodNotAllowedHandler
+ return true
+ }
+
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
@@ -105,9 +118,15 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
req = setVars(req, match.Vars)
req = setCurrentRoute(req, match.Route)
}
+
+ if handler == nil && match.MatchErr == ErrMethodMismatch {
+ handler = methodNotAllowedHandler()
+ }
+
if handler == nil {
handler = http.NotFoundHandler()
}
+
if !r.KeepContext {
defer contextClear(req)
}
@@ -176,6 +195,13 @@ func (r *Router) UseEncodedPath() *Router {
// parentRoute
// ----------------------------------------------------------------------------
+func (r *Router) getBuildScheme() string {
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
// getNamedRoutes returns the map where named routes are registered.
func (r *Router) getNamedRoutes() map[string]*Route {
if r.namedRoutes == nil {
@@ -299,10 +325,6 @@ type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
- if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
- continue
- }
-
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
@@ -312,10 +334,12 @@ func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
+ ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
+ ancestors = ancestors[:len(ancestors)-1]
}
}
if h, ok := t.handler.(*Router); ok {
@@ -339,6 +363,11 @@ type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
+
+ // MatchErr is set to appropriate matching error
+ // It is set to ErrMethodMismatch if there is a mismatch in
+ // the request method and route method
+ MatchErr error
}
type contextKey int
@@ -458,7 +487,7 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) {
return m, nil
}
-// mapFromPairsToRegex converts variadic string paramers to a
+// mapFromPairsToRegex converts variadic string parameters to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
@@ -540,3 +569,12 @@ func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]s
}
return true
}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go
index 19ef5a8cc..484fab431 100644
--- a/vendor/github.com/gorilla/mux/mux_test.go
+++ b/vendor/github.com/gorilla/mux/mux_test.go
@@ -11,6 +11,7 @@ import (
"fmt"
"net/http"
"net/url"
+ "reflect"
"strings"
"testing"
)
@@ -35,6 +36,7 @@ type routeTest struct {
scheme string // the expected scheme of the built URL
host string // the expected host of the built URL
path string // the expected path of the built URL
+ query string // the expected query string of the built URL
pathTemplate string // the expected path template of the route
hostTemplate string // the expected host template of the route
methods []string // the expected route methods
@@ -743,6 +745,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{},
host: "",
path: "",
+ query: "foo=bar&baz=ding",
shouldMatch: true,
},
{
@@ -752,6 +755,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{},
host: "",
path: "",
+ query: "foo=bar&baz=ding",
pathTemplate: `/api`,
hostTemplate: `www.example.com`,
shouldMatch: true,
@@ -763,6 +767,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{},
host: "",
path: "",
+ query: "foo=bar&baz=ding",
pathTemplate: `/api`,
hostTemplate: `www.example.com`,
shouldMatch: true,
@@ -783,6 +788,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "bar"},
host: "",
path: "",
+ query: "foo=bar",
shouldMatch: true,
},
{
@@ -792,6 +798,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "bar", "v2": "ding"},
host: "",
path: "",
+ query: "foo=bar&baz=ding",
shouldMatch: true,
},
{
@@ -801,6 +808,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "10"},
host: "",
path: "",
+ query: "foo=10",
shouldMatch: true,
},
{
@@ -819,6 +827,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "1"},
host: "",
path: "",
+ query: "foo=1",
shouldMatch: true,
},
{
@@ -828,6 +837,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "1"},
host: "",
path: "",
+ query: "foo=1",
shouldMatch: true,
},
{
@@ -846,6 +856,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v1": "1a"},
host: "",
path: "",
+ query: "foo=1a",
shouldMatch: true,
},
{
@@ -864,6 +875,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v-1": "bar"},
host: "",
path: "",
+ query: "foo=bar",
shouldMatch: true,
},
{
@@ -873,6 +885,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v-1": "bar", "v-2": "ding"},
host: "",
path: "",
+ query: "foo=bar&baz=ding",
shouldMatch: true,
},
{
@@ -882,6 +895,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v-1": "10"},
host: "",
path: "",
+ query: "foo=10",
shouldMatch: true,
},
{
@@ -891,6 +905,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"v-1": "1a"},
host: "",
path: "",
+ query: "foo=1a",
shouldMatch: true,
},
{
@@ -900,6 +915,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{},
host: "",
path: "",
+ query: "foo=",
shouldMatch: true,
},
{
@@ -918,6 +934,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{},
host: "",
path: "",
+ query: "foo=",
shouldMatch: true,
},
{
@@ -945,6 +962,7 @@ func TestQueries(t *testing.T) {
vars: map[string]string{"foo": ""},
host: "",
path: "",
+ query: "foo=",
shouldMatch: true,
},
{
@@ -956,6 +974,16 @@ func TestQueries(t *testing.T) {
path: "",
shouldMatch: false,
},
+ {
+ title: "Queries route with pattern, match, escaped value",
+ route: new(Route).Queries("foo", "{v1}"),
+ request: newRequest("GET", "http://localhost?foo=%25bar%26%20%2F%3D%3F"),
+ vars: map[string]string{"v1": "%bar& /=?"},
+ host: "",
+ path: "",
+ query: "foo=%25bar%26+%2F%3D%3F",
+ shouldMatch: true,
+ },
}
for _, test := range tests {
@@ -1187,6 +1215,28 @@ func TestSubRouter(t *testing.T) {
pathTemplate: `/{category}`,
shouldMatch: true,
},
+ {
+ title: "Build with scheme on parent router",
+ route: new(Route).Schemes("ftp").Host("google.com").Subrouter().Path("/"),
+ request: newRequest("GET", "ftp://google.com/"),
+ scheme: "ftp",
+ host: "google.com",
+ path: "/",
+ pathTemplate: `/`,
+ hostTemplate: `google.com`,
+ shouldMatch: true,
+ },
+ {
+ title: "Prefer scheme on child route when building URLs",
+ route: new(Route).Schemes("https", "ftp").Host("google.com").Subrouter().Schemes("ftp").Path("/"),
+ request: newRequest("GET", "ftp://google.com/"),
+ scheme: "ftp",
+ host: "google.com",
+ path: "/",
+ pathTemplate: `/`,
+ hostTemplate: `google.com`,
+ shouldMatch: true,
+ },
}
for _, test := range tests {
@@ -1382,14 +1432,58 @@ func TestWalkNested(t *testing.T) {
l2 := l1.PathPrefix("/l").Subrouter()
l2.Path("/a")
- paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"}
+ testCases := []struct {
+ path string
+ ancestors []*Route
+ }{
+ {"/g", []*Route{}},
+ {"/g/o", []*Route{g.parent.(*Route)}},
+ {"/g/o/r", []*Route{g.parent.(*Route), o.parent.(*Route)}},
+ {"/g/o/r/i", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route)}},
+ {"/g/o/r/i/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route)}},
+ {"/g/o/r/i/l/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route)}},
+ {"/g/o/r/i/l/l/a", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route), l2.parent.(*Route)}},
+ }
+
idx := 0
err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error {
- path := paths[idx]
+ path := testCases[idx].path
tpl := route.regexp.path.template
if tpl != path {
t.Errorf(`Expected %s got %s`, path, tpl)
}
+ currWantAncestors := testCases[idx].ancestors
+ if !reflect.DeepEqual(currWantAncestors, ancestors) {
+ t.Errorf(`Expected %+v got %+v`, currWantAncestors, ancestors)
+ }
+ idx++
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ if idx != len(testCases) {
+ t.Errorf("Expected %d routes, found %d", len(testCases), idx)
+ }
+}
+
+func TestWalkSubrouters(t *testing.T) {
+ router := NewRouter()
+
+ g := router.Path("/g").Subrouter()
+ o := g.PathPrefix("/o").Subrouter()
+ o.Methods("GET")
+ o.Methods("PUT")
+
+ // all 4 routes should be matched, but final 2 routes do not have path templates
+ paths := []string{"/g", "/g/o", "", ""}
+ idx := 0
+ err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error {
+ path := paths[idx]
+ tpl, _ := route.GetPathTemplate()
+ if tpl != path {
+ t.Errorf(`Expected %s got %s`, path, tpl)
+ }
idx++
return nil
})
@@ -1492,6 +1586,7 @@ func testRoute(t *testing.T, test routeTest) {
route := test.route
vars := test.vars
shouldMatch := test.shouldMatch
+ query := test.query
shouldRedirect := test.shouldRedirect
uri := url.URL{
Scheme: test.scheme,
@@ -1561,6 +1656,13 @@ func testRoute(t *testing.T, test routeTest) {
return
}
}
+ if query != "" {
+ u, _ := route.URL(mapToPairs(match.Vars)...)
+ if query != u.RawQuery {
+ t.Errorf("(%v) URL query not equal: expected %v, got %v", test.title, query, u.RawQuery)
+ return
+ }
+ }
if shouldRedirect && match.Handler == nil {
t.Errorf("(%v) Did not redirect", test.title)
return
@@ -1769,3 +1871,42 @@ func newRequest(method, url string) *http.Request {
}
return req
}
+
+func TestNoMatchMethodErrorHandler(t *testing.T) {
+ func1 := func(w http.ResponseWriter, r *http.Request) {}
+
+ r := NewRouter()
+ r.HandleFunc("/", func1).Methods("GET", "POST")
+
+ req, _ := http.NewRequest("PUT", "http://localhost/", nil)
+ match := new(RouteMatch)
+ matched := r.Match(req, match)
+
+ if matched {
+ t.Error("Should not have matched route for methods")
+ }
+
+ if match.MatchErr != ErrMethodMismatch {
+ t.Error("Should get ErrMethodMismatch error")
+ }
+
+ resp := NewRecorder()
+ r.ServeHTTP(resp, req)
+ if resp.Code != 405 {
+ t.Errorf("Expecting code %v", 405)
+ }
+
+ // Add matching route
+ r.HandleFunc("/", func1).Methods("PUT")
+
+ match = new(RouteMatch)
+ matched = r.Match(req, match)
+
+ if !matched {
+ t.Error("Should have matched route for methods")
+ }
+
+ if match.MatchErr != nil {
+ t.Error("Should not have any matching error. Found:", match.MatchErr)
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go
index 9bdc5e5d1..3751e4727 100644
--- a/vendor/github.com/gorilla/mux/old_test.go
+++ b/vendor/github.com/gorilla/mux/old_test.go
@@ -121,12 +121,7 @@ func TestRouteMatchers(t *testing.T) {
var routeMatch RouteMatch
matched := router.Match(request, &routeMatch)
if matched != shouldMatch {
- // Need better messages. :)
- if matched {
- t.Errorf("Should match.")
- } else {
- t.Errorf("Should not match.")
- }
+ t.Errorf("Expected: %v\nGot: %v\nRequest: %v %v", shouldMatch, matched, request.Method, url)
}
if matched {
@@ -188,7 +183,6 @@ func TestRouteMatchers(t *testing.T) {
match(true)
// 2nd route --------------------------------------------------------------
-
// Everything match.
reset2()
match(true)
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index 0189ad346..80d1f7858 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -35,7 +35,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Now let's parse it.
defaultPattern := "[^/]+"
if matchQuery {
- defaultPattern = "[^?&]*"
+ defaultPattern = ".*"
} else if matchHost {
defaultPattern = "[^.]+"
matchPrefix = false
@@ -178,6 +178,9 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
+ if r.matchQuery {
+ value = url.QueryEscape(value)
+ }
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index 56dcbbdc5..6863adba5 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -52,12 +52,27 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
+
+ var matchErr error
+
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
+ if _, ok := m.(methodMatcher); ok {
+ matchErr = ErrMethodMismatch
+ continue
+ }
+ matchErr = nil
return false
}
}
+
+ if matchErr != nil {
+ match.MatchErr = matchErr
+ return false
+ }
+
+ match.MatchErr = nil
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
@@ -68,6 +83,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if match.Vars == nil {
match.Vars = make(map[string]string)
}
+
// Set variables.
if r.regexp != nil {
r.regexp.setMatch(req, match, r)
@@ -482,13 +498,14 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
return nil, err
}
var scheme, host, path string
+ queries := make([]string, 0, len(r.regexp.queries))
if r.regexp.host != nil {
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
scheme = "http"
- if r.buildScheme != "" {
- scheme = r.buildScheme
+ if s := r.getBuildScheme(); s != "" {
+ scheme = s
}
}
if r.regexp.path != nil {
@@ -496,10 +513,18 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
return nil, err
}
}
+ for _, q := range r.regexp.queries {
+ var query string
+ if query, err = q.url(values); err != nil {
+ return nil, err
+ }
+ queries = append(queries, query)
+ }
return &url.URL{
- Scheme: scheme,
- Host: host,
- Path: path,
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ RawQuery: strings.Join(queries, "&"),
}, nil
}
@@ -525,8 +550,8 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
Scheme: "http",
Host: host,
}
- if r.buildScheme != "" {
- u.Scheme = r.buildScheme
+ if s := r.getBuildScheme(); s != "" {
+ u.Scheme = s
}
return u, nil
}
@@ -640,11 +665,22 @@ func (r *Route) buildVars(m map[string]string) map[string]string {
// parentRoute allows routes to know about parent host and path definitions.
type parentRoute interface {
+ getBuildScheme() string
getNamedRoutes() map[string]*Route
getRegexpGroup() *routeRegexpGroup
buildVars(map[string]string) map[string]string
}
+func (r *Route) getBuildScheme() string {
+ if r.buildScheme != "" {
+ return r.buildScheme
+ }
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
// getNamedRoutes returns the map where named routes are registered.
func (r *Route) getNamedRoutes() map[string]*Route {
if r.parent == nil {
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index 6e75ece8e..b88f322a8 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -137,7 +137,7 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
- if n.Token.Type == token.FLOAT {
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go
index 38363ad1b..8682f470e 100644
--- a/vendor/github.com/hashicorp/hcl/decoder_test.go
+++ b/vendor/github.com/hashicorp/hcl/decoder_test.go
@@ -73,6 +73,7 @@ func TestDecode_interface(t *testing.T) {
false,
map[string]interface{}{
"a": 1.02,
+ "b": 2,
},
},
{
@@ -811,6 +812,7 @@ func TestDecode_intString(t *testing.T) {
func TestDecode_float32(t *testing.T) {
var value struct {
A float32 `hcl:"a"`
+ B float32 `hcl:"b"`
}
err := Decode(&value, testReadFile(t, "float.hcl"))
@@ -821,11 +823,15 @@ func TestDecode_float32(t *testing.T) {
if got, want := value.A, float32(1.02); got != want {
t.Fatalf("wrong result %#v; want %#v", got, want)
}
+ if got, want := value.B, float32(2); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
}
func TestDecode_float64(t *testing.T) {
var value struct {
A float64 `hcl:"a"`
+ B float64 `hcl:"b"`
}
err := Decode(&value, testReadFile(t, "float.hcl"))
@@ -836,6 +842,9 @@ func TestDecode_float64(t *testing.T) {
if got, want := value.A, float64(1.02); got != want {
t.Fatalf("wrong result %#v; want %#v", got, want)
}
+ if got, want := value.B, float64(2); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
}
func TestDecode_intStringAliased(t *testing.T) {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index b4881806e..098e1bc49 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -197,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
keyStr = append(keyStr, k.Token.Text)
}
- return nil, fmt.Errorf(
- "key '%s' expected start of object ('{') or assignment ('=')",
- strings.Join(keyStr, " "))
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
}
// do a look-ahead for line comment
@@ -319,7 +322,10 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
- return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
}
o.List = l
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
index eed44e542..edf355e38 100644
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
@@ -1 +1,2 @@
a = 1.02
+b = 2
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
index a9d1ab4b0..580868043 100644
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
@@ -1,3 +1,4 @@
{
- "a": 1.02
+ "a": 1.02,
+ "b": 2
}
diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go
index d5c175e5b..e1e38fc94 100644
--- a/vendor/github.com/hashicorp/memberlist/memberlist.go
+++ b/vendor/github.com/hashicorp/memberlist/memberlist.go
@@ -127,7 +127,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) {
return nt, nil
}
if strings.Contains(err.Error(), "address already in use") {
- logger.Printf("[DEBUG] Got bind error: %v", err)
+ logger.Printf("[DEBUG] memberlist: Got bind error: %v", err)
continue
}
}
@@ -154,7 +154,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) {
port := nt.GetAutoBindPort()
conf.BindPort = port
conf.AdvertisePort = port
- logger.Printf("[DEBUG] Using dynamic bind port %d", port)
+ logger.Printf("[DEBUG] memberlist: Using dynamic bind port %d", port)
}
transport = nt
}
diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml
index 1a4656c53..452515c66 100644
--- a/vendor/github.com/lib/pq/.travis.yml
+++ b/vendor/github.com/lib/pq/.travis.yml
@@ -5,6 +5,7 @@ go:
- 1.6.x
- 1.7.x
- 1.8.x
+ - 1.9.x
- master
sudo: true
diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
index 1725ab0d3..338a0bc18 100644
--- a/vendor/github.com/lib/pq/conn.go
+++ b/vendor/github.com/lib/pq/conn.go
@@ -706,7 +706,7 @@ func (noRows) RowsAffected() (int64, error) {
// Decides which column formats to use for a prepared statement. The input is
// an array of type oids, one element per result column.
-func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) {
+func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
if len(colTyps) == 0 {
return nil, colFmtDataAllText
}
@@ -718,8 +718,8 @@ func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, c
allBinary := true
allText := true
- for i, o := range colTyps {
- switch o {
+ for i, t := range colTyps {
+ switch t.OID {
// This is the list of types to use binary mode for when receiving them
// through a prepared statement. If a type appears in this list, it
// must also be implemented in binaryDecode in encode.go.
@@ -1155,7 +1155,7 @@ type stmt struct {
colNames []string
colFmts []format
colFmtData []byte
- colTyps []oid.Oid
+ colTyps []fieldDesc
paramTyps []oid.Oid
closed bool
}
@@ -1318,7 +1318,7 @@ type rows struct {
cn *conn
finish func()
colNames []string
- colTyps []oid.Oid
+ colTyps []fieldDesc
colFmts []format
done bool
rb readBuf
@@ -1406,7 +1406,7 @@ func (rs *rows) Next(dest []driver.Value) (err error) {
dest[i] = nil
continue
}
- dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i])
+ dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
}
return
case 'T':
@@ -1573,7 +1573,7 @@ func (cn *conn) readParseResponse() {
}
}
-func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) {
+func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
for {
t, r := cn.recv1()
switch t {
@@ -1599,7 +1599,7 @@ func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames [
}
}
-func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) {
+func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
t, r := cn.recv1()
switch t {
case 'T':
@@ -1695,31 +1695,33 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co
}
}
-func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) {
+func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
- colTyps = make([]oid.Oid, n)
+ colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
- colTyps[i] = r.oid()
- r.next(6)
+ colTyps[i].OID = r.oid()
+ colTyps[i].Len = r.int16()
+ colTyps[i].Mod = r.int32()
// format code not known when describing a statement; always 0
r.next(2)
}
return
}
-func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) {
+func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
colFmts = make([]format, n)
- colTyps = make([]oid.Oid, n)
+ colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
- colTyps[i] = r.oid()
- r.next(6)
+ colTyps[i].OID = r.oid()
+ colTyps[i].Len = r.int16()
+ colTyps[i].Mod = r.int32()
colFmts[i] = format(r.int16())
}
return
diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go
index 88a322cda..3b0d365f2 100644
--- a/vendor/github.com/lib/pq/encode.go
+++ b/vendor/github.com/lib/pq/encode.go
@@ -367,8 +367,15 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
+ minLen := monSep + len("01-01") + 1
+
+ isBC := strings.HasSuffix(str, " BC")
+ if isBC {
+ minLen += 3
+ }
+
var hour, minute, second int
- if len(str) > monSep+len("01-01")+1 {
+ if len(str) > minLen {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
@@ -424,7 +431,8 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
- if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
+
+ if isBC {
isoYear = 1 - year
remainderIdx += 3
} else {
diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go
index 3a0f7286e..837d45bec 100644
--- a/vendor/github.com/lib/pq/encode_test.go
+++ b/vendor/github.com/lib/pq/encode_test.go
@@ -37,6 +37,8 @@ var timeTests = []struct {
}{
{"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
{"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
+ {"0001-12-31 BC", time.Date(0, time.December, 31, 0, 0, 0, 0, time.FixedZone("", 0))},
+ {"2001-02-03 BC", time.Date(-2000, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
{"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))},
@@ -86,15 +88,22 @@ func TestParseTs(t *testing.T) {
}
var timeErrorTests = []string{
+ "BC",
+ " BC",
"2001",
"2001-2-03",
"2001-02-3",
"2001-02-03 ",
+ "2001-02-03 B",
"2001-02-03 04",
"2001-02-03 04:",
"2001-02-03 04:05",
+ "2001-02-03 04:05 B",
+ "2001-02-03 04:05 BC",
"2001-02-03 04:05:",
"2001-02-03 04:05:6",
+ "2001-02-03 04:05:06 B",
+ "2001-02-03 04:05:06BC",
"2001-02-03 04:05:06.123 B",
}
diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go
index cd4aea808..7c634cdc5 100644
--- a/vendor/github.com/lib/pq/oid/gen.go
+++ b/vendor/github.com/lib/pq/oid/gen.go
@@ -10,10 +10,22 @@ import (
"log"
"os"
"os/exec"
+ "strings"
_ "github.com/lib/pq"
)
+// OID represent a postgres Object Identifier Type.
+type OID struct {
+ ID int
+ Type string
+}
+
+// Name returns an upper case version of the oid type.
+func (o OID) Name() string {
+ return strings.ToUpper(o.Type)
+}
+
func main() {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
@@ -30,6 +42,25 @@ func main() {
if err != nil {
log.Fatal(err)
}
+ rows, err := db.Query(`
+ SELECT typname, oid
+ FROM pg_type WHERE oid < 10000
+ ORDER BY oid;
+ `)
+ if err != nil {
+ log.Fatal(err)
+ }
+ oids := make([]*OID, 0)
+ for rows.Next() {
+ var oid OID
+ if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
+ log.Fatal(err)
+ }
+ oids = append(oids, &oid)
+ }
+ if err = rows.Err(); err != nil {
+ log.Fatal(err)
+ }
cmd := exec.Command("gofmt")
cmd.Stderr = os.Stderr
w, err := cmd.StdinPipe()
@@ -45,30 +76,18 @@ func main() {
if err != nil {
log.Fatal(err)
}
- fmt.Fprintln(w, "// generated by 'go run gen.go'; do not edit")
+ fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
fmt.Fprintln(w, "\npackage oid")
fmt.Fprintln(w, "const (")
- rows, err := db.Query(`
- SELECT typname, oid
- FROM pg_type WHERE oid < 10000
- ORDER BY oid;
- `)
- if err != nil {
- log.Fatal(err)
- }
- var name string
- var oid int
- for rows.Next() {
- err = rows.Scan(&name, &oid)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Fprintf(w, "T_%s Oid = %d\n", name, oid)
- }
- if err = rows.Err(); err != nil {
- log.Fatal(err)
+ for _, oid := range oids {
+ fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
}
fmt.Fprintln(w, ")")
+ fmt.Fprintln(w, "var TypeName = map[Oid]string{")
+ for _, oid := range oids {
+ fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
+ }
+ fmt.Fprintln(w, "}")
w.Close()
cmd.Wait()
}
diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go
index a3390c23a..ecc84c2c8 100644
--- a/vendor/github.com/lib/pq/oid/types.go
+++ b/vendor/github.com/lib/pq/oid/types.go
@@ -1,4 +1,4 @@
-// generated by 'go run gen.go'; do not edit
+// Code generated by gen.go. DO NOT EDIT.
package oid
@@ -171,3 +171,173 @@ const (
T_regrole Oid = 4096
T__regrole Oid = 4097
)
+
+var TypeName = map[Oid]string{
+ T_bool: "BOOL",
+ T_bytea: "BYTEA",
+ T_char: "CHAR",
+ T_name: "NAME",
+ T_int8: "INT8",
+ T_int2: "INT2",
+ T_int2vector: "INT2VECTOR",
+ T_int4: "INT4",
+ T_regproc: "REGPROC",
+ T_text: "TEXT",
+ T_oid: "OID",
+ T_tid: "TID",
+ T_xid: "XID",
+ T_cid: "CID",
+ T_oidvector: "OIDVECTOR",
+ T_pg_ddl_command: "PG_DDL_COMMAND",
+ T_pg_type: "PG_TYPE",
+ T_pg_attribute: "PG_ATTRIBUTE",
+ T_pg_proc: "PG_PROC",
+ T_pg_class: "PG_CLASS",
+ T_json: "JSON",
+ T_xml: "XML",
+ T__xml: "_XML",
+ T_pg_node_tree: "PG_NODE_TREE",
+ T__json: "_JSON",
+ T_smgr: "SMGR",
+ T_index_am_handler: "INDEX_AM_HANDLER",
+ T_point: "POINT",
+ T_lseg: "LSEG",
+ T_path: "PATH",
+ T_box: "BOX",
+ T_polygon: "POLYGON",
+ T_line: "LINE",
+ T__line: "_LINE",
+ T_cidr: "CIDR",
+ T__cidr: "_CIDR",
+ T_float4: "FLOAT4",
+ T_float8: "FLOAT8",
+ T_abstime: "ABSTIME",
+ T_reltime: "RELTIME",
+ T_tinterval: "TINTERVAL",
+ T_unknown: "UNKNOWN",
+ T_circle: "CIRCLE",
+ T__circle: "_CIRCLE",
+ T_money: "MONEY",
+ T__money: "_MONEY",
+ T_macaddr: "MACADDR",
+ T_inet: "INET",
+ T__bool: "_BOOL",
+ T__bytea: "_BYTEA",
+ T__char: "_CHAR",
+ T__name: "_NAME",
+ T__int2: "_INT2",
+ T__int2vector: "_INT2VECTOR",
+ T__int4: "_INT4",
+ T__regproc: "_REGPROC",
+ T__text: "_TEXT",
+ T__tid: "_TID",
+ T__xid: "_XID",
+ T__cid: "_CID",
+ T__oidvector: "_OIDVECTOR",
+ T__bpchar: "_BPCHAR",
+ T__varchar: "_VARCHAR",
+ T__int8: "_INT8",
+ T__point: "_POINT",
+ T__lseg: "_LSEG",
+ T__path: "_PATH",
+ T__box: "_BOX",
+ T__float4: "_FLOAT4",
+ T__float8: "_FLOAT8",
+ T__abstime: "_ABSTIME",
+ T__reltime: "_RELTIME",
+ T__tinterval: "_TINTERVAL",
+ T__polygon: "_POLYGON",
+ T__oid: "_OID",
+ T_aclitem: "ACLITEM",
+ T__aclitem: "_ACLITEM",
+ T__macaddr: "_MACADDR",
+ T__inet: "_INET",
+ T_bpchar: "BPCHAR",
+ T_varchar: "VARCHAR",
+ T_date: "DATE",
+ T_time: "TIME",
+ T_timestamp: "TIMESTAMP",
+ T__timestamp: "_TIMESTAMP",
+ T__date: "_DATE",
+ T__time: "_TIME",
+ T_timestamptz: "TIMESTAMPTZ",
+ T__timestamptz: "_TIMESTAMPTZ",
+ T_interval: "INTERVAL",
+ T__interval: "_INTERVAL",
+ T__numeric: "_NUMERIC",
+ T_pg_database: "PG_DATABASE",
+ T__cstring: "_CSTRING",
+ T_timetz: "TIMETZ",
+ T__timetz: "_TIMETZ",
+ T_bit: "BIT",
+ T__bit: "_BIT",
+ T_varbit: "VARBIT",
+ T__varbit: "_VARBIT",
+ T_numeric: "NUMERIC",
+ T_refcursor: "REFCURSOR",
+ T__refcursor: "_REFCURSOR",
+ T_regprocedure: "REGPROCEDURE",
+ T_regoper: "REGOPER",
+ T_regoperator: "REGOPERATOR",
+ T_regclass: "REGCLASS",
+ T_regtype: "REGTYPE",
+ T__regprocedure: "_REGPROCEDURE",
+ T__regoper: "_REGOPER",
+ T__regoperator: "_REGOPERATOR",
+ T__regclass: "_REGCLASS",
+ T__regtype: "_REGTYPE",
+ T_record: "RECORD",
+ T_cstring: "CSTRING",
+ T_any: "ANY",
+ T_anyarray: "ANYARRAY",
+ T_void: "VOID",
+ T_trigger: "TRIGGER",
+ T_language_handler: "LANGUAGE_HANDLER",
+ T_internal: "INTERNAL",
+ T_opaque: "OPAQUE",
+ T_anyelement: "ANYELEMENT",
+ T__record: "_RECORD",
+ T_anynonarray: "ANYNONARRAY",
+ T_pg_authid: "PG_AUTHID",
+ T_pg_auth_members: "PG_AUTH_MEMBERS",
+ T__txid_snapshot: "_TXID_SNAPSHOT",
+ T_uuid: "UUID",
+ T__uuid: "_UUID",
+ T_txid_snapshot: "TXID_SNAPSHOT",
+ T_fdw_handler: "FDW_HANDLER",
+ T_pg_lsn: "PG_LSN",
+ T__pg_lsn: "_PG_LSN",
+ T_tsm_handler: "TSM_HANDLER",
+ T_anyenum: "ANYENUM",
+ T_tsvector: "TSVECTOR",
+ T_tsquery: "TSQUERY",
+ T_gtsvector: "GTSVECTOR",
+ T__tsvector: "_TSVECTOR",
+ T__gtsvector: "_GTSVECTOR",
+ T__tsquery: "_TSQUERY",
+ T_regconfig: "REGCONFIG",
+ T__regconfig: "_REGCONFIG",
+ T_regdictionary: "REGDICTIONARY",
+ T__regdictionary: "_REGDICTIONARY",
+ T_jsonb: "JSONB",
+ T__jsonb: "_JSONB",
+ T_anyrange: "ANYRANGE",
+ T_event_trigger: "EVENT_TRIGGER",
+ T_int4range: "INT4RANGE",
+ T__int4range: "_INT4RANGE",
+ T_numrange: "NUMRANGE",
+ T__numrange: "_NUMRANGE",
+ T_tsrange: "TSRANGE",
+ T__tsrange: "_TSRANGE",
+ T_tstzrange: "TSTZRANGE",
+ T__tstzrange: "_TSTZRANGE",
+ T_daterange: "DATERANGE",
+ T__daterange: "_DATERANGE",
+ T_int8range: "INT8RANGE",
+ T__int8range: "_INT8RANGE",
+ T_pg_shseclabel: "PG_SHSECLABEL",
+ T_regnamespace: "REGNAMESPACE",
+ T__regnamespace: "_REGNAMESPACE",
+ T_regrole: "REGROLE",
+ T__regrole: "_REGROLE",
+}
diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go
new file mode 100644
index 000000000..c6aa5b9a3
--- /dev/null
+++ b/vendor/github.com/lib/pq/rows.go
@@ -0,0 +1,93 @@
+package pq
+
+import (
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/lib/pq/oid"
+)
+
+const headerSize = 4
+
+type fieldDesc struct {
+ // The object ID of the data type.
+ OID oid.Oid
+ // The data type size (see pg_type.typlen).
+ // Note that negative values denote variable-width types.
+ Len int
+ // The type modifier (see pg_attribute.atttypmod).
+ // The meaning of the modifier is type-specific.
+ Mod int
+}
+
+func (fd fieldDesc) Type() reflect.Type {
+ switch fd.OID {
+ case oid.T_int8:
+ return reflect.TypeOf(int64(0))
+ case oid.T_int4:
+ return reflect.TypeOf(int32(0))
+ case oid.T_int2:
+ return reflect.TypeOf(int16(0))
+ case oid.T_varchar, oid.T_text:
+ return reflect.TypeOf("")
+ case oid.T_bool:
+ return reflect.TypeOf(false)
+ case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
+ return reflect.TypeOf(time.Time{})
+ case oid.T_bytea:
+ return reflect.TypeOf([]byte(nil))
+ default:
+ return reflect.TypeOf(new(interface{})).Elem()
+ }
+}
+
+func (fd fieldDesc) Name() string {
+ return oid.TypeName[fd.OID]
+}
+
+func (fd fieldDesc) Length() (length int64, ok bool) {
+ switch fd.OID {
+ case oid.T_text, oid.T_bytea:
+ return math.MaxInt64, true
+ case oid.T_varchar, oid.T_bpchar:
+ return int64(fd.Mod - headerSize), true
+ default:
+ return 0, false
+ }
+}
+
+func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
+ switch fd.OID {
+ case oid.T_numeric, oid.T__numeric:
+ mod := fd.Mod - headerSize
+ precision = int64((mod >> 16) & 0xffff)
+ scale = int64(mod & 0xffff)
+ return precision, scale, true
+ default:
+ return 0, 0, false
+ }
+}
+
+// ColumnTypeScanType returns the value type that can be used to scan types into.
+func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
+ return rs.colTyps[index].Type()
+}
+
+// ColumnTypeDatabaseTypeName return the database system type name.
+func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
+ return rs.colTyps[index].Name()
+}
+
+// ColumnTypeLength returns the length of the column type if the column is a
+// variable length type. If the column is not a variable length type ok
+// should return false.
+func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
+ return rs.colTyps[index].Length()
+}
+
+// ColumnTypePrecisionScale should return the precision and scale for decimal
+// types. If not applicable, ok should be false.
+func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+ return rs.colTyps[index].PrecisionScale()
+}
diff --git a/vendor/github.com/lib/pq/rows_test.go b/vendor/github.com/lib/pq/rows_test.go
new file mode 100644
index 000000000..3033bc01b
--- /dev/null
+++ b/vendor/github.com/lib/pq/rows_test.go
@@ -0,0 +1,220 @@
+// +build go1.8
+
+package pq
+
+import (
+ "math"
+ "reflect"
+ "testing"
+
+ "github.com/lib/pq/oid"
+)
+
+func TestDataTypeName(t *testing.T) {
+ tts := []struct {
+ typ oid.Oid
+ name string
+ }{
+ {oid.T_int8, "INT8"},
+ {oid.T_int4, "INT4"},
+ {oid.T_int2, "INT2"},
+ {oid.T_varchar, "VARCHAR"},
+ {oid.T_text, "TEXT"},
+ {oid.T_bool, "BOOL"},
+ {oid.T_numeric, "NUMERIC"},
+ {oid.T_date, "DATE"},
+ {oid.T_time, "TIME"},
+ {oid.T_timetz, "TIMETZ"},
+ {oid.T_timestamp, "TIMESTAMP"},
+ {oid.T_timestamptz, "TIMESTAMPTZ"},
+ {oid.T_bytea, "BYTEA"},
+ }
+
+ for i, tt := range tts {
+ dt := fieldDesc{OID: tt.typ}
+ if name := dt.Name(); name != tt.name {
+ t.Errorf("(%d) got: %s want: %s", i, name, tt.name)
+ }
+ }
+}
+
+func TestDataType(t *testing.T) {
+ tts := []struct {
+ typ oid.Oid
+ kind reflect.Kind
+ }{
+ {oid.T_int8, reflect.Int64},
+ {oid.T_int4, reflect.Int32},
+ {oid.T_int2, reflect.Int16},
+ {oid.T_varchar, reflect.String},
+ {oid.T_text, reflect.String},
+ {oid.T_bool, reflect.Bool},
+ {oid.T_date, reflect.Struct},
+ {oid.T_time, reflect.Struct},
+ {oid.T_timetz, reflect.Struct},
+ {oid.T_timestamp, reflect.Struct},
+ {oid.T_timestamptz, reflect.Struct},
+ {oid.T_bytea, reflect.Slice},
+ }
+
+ for i, tt := range tts {
+ dt := fieldDesc{OID: tt.typ}
+ if kind := dt.Type().Kind(); kind != tt.kind {
+ t.Errorf("(%d) got: %s want: %s", i, kind, tt.kind)
+ }
+ }
+}
+
+func TestDataTypeLength(t *testing.T) {
+ tts := []struct {
+ typ oid.Oid
+ len int
+ mod int
+ length int64
+ ok bool
+ }{
+ {oid.T_int4, 0, -1, 0, false},
+ {oid.T_varchar, 65535, 9, 5, true},
+ {oid.T_text, 65535, -1, math.MaxInt64, true},
+ {oid.T_bytea, 65535, -1, math.MaxInt64, true},
+ }
+
+ for i, tt := range tts {
+ dt := fieldDesc{OID: tt.typ, Len: tt.len, Mod: tt.mod}
+ if l, k := dt.Length(); k != tt.ok || l != tt.length {
+ t.Errorf("(%d) got: %d, %t want: %d, %t", i, l, k, tt.length, tt.ok)
+ }
+ }
+}
+
+func TestDataTypePrecisionScale(t *testing.T) {
+ tts := []struct {
+ typ oid.Oid
+ mod int
+ precision, scale int64
+ ok bool
+ }{
+ {oid.T_int4, -1, 0, 0, false},
+ {oid.T_numeric, 589830, 9, 2, true},
+ {oid.T_text, -1, 0, 0, false},
+ }
+
+ for i, tt := range tts {
+ dt := fieldDesc{OID: tt.typ, Mod: tt.mod}
+ p, s, k := dt.PrecisionScale()
+ if k != tt.ok {
+ t.Errorf("(%d) got: %t want: %t", i, k, tt.ok)
+ }
+ if p != tt.precision {
+ t.Errorf("(%d) wrong precision got: %d want: %d", i, p, tt.precision)
+ }
+ if s != tt.scale {
+ t.Errorf("(%d) wrong scale got: %d want: %d", i, s, tt.scale)
+ }
+ }
+}
+
+func TestRowsColumnTypes(t *testing.T) {
+ columnTypesTests := []struct {
+ Name string
+ TypeName string
+ Length struct {
+ Len int64
+ OK bool
+ }
+ DecimalSize struct {
+ Precision int64
+ Scale int64
+ OK bool
+ }
+ ScanType reflect.Type
+ }{
+ {
+ Name: "a",
+ TypeName: "INT4",
+ Length: struct {
+ Len int64
+ OK bool
+ }{
+ Len: 0,
+ OK: false,
+ },
+ DecimalSize: struct {
+ Precision int64
+ Scale int64
+ OK bool
+ }{
+ Precision: 0,
+ Scale: 0,
+ OK: false,
+ },
+ ScanType: reflect.TypeOf(int32(0)),
+ }, {
+ Name: "bar",
+ TypeName: "TEXT",
+ Length: struct {
+ Len int64
+ OK bool
+ }{
+ Len: math.MaxInt64,
+ OK: true,
+ },
+ DecimalSize: struct {
+ Precision int64
+ Scale int64
+ OK bool
+ }{
+ Precision: 0,
+ Scale: 0,
+ OK: false,
+ },
+ ScanType: reflect.TypeOf(""),
+ },
+ }
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar, 1.28::numeric(9, 2) AS dec")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ columns, err := rows.ColumnTypes()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(columns) != 3 {
+ t.Errorf("expected 3 columns found %d", len(columns))
+ }
+
+ for i, tt := range columnTypesTests {
+ c := columns[i]
+ if c.Name() != tt.Name {
+ t.Errorf("(%d) got: %s, want: %s", i, c.Name(), tt.Name)
+ }
+ if c.DatabaseTypeName() != tt.TypeName {
+ t.Errorf("(%d) got: %s, want: %s", i, c.DatabaseTypeName(), tt.TypeName)
+ }
+ l, ok := c.Length()
+ if l != tt.Length.Len {
+ t.Errorf("(%d) got: %d, want: %d", i, l, tt.Length.Len)
+ }
+ if ok != tt.Length.OK {
+ t.Errorf("(%d) got: %t, want: %t", i, ok, tt.Length.OK)
+ }
+ p, s, ok := c.DecimalSize()
+ if p != tt.DecimalSize.Precision {
+ t.Errorf("(%d) got: %d, want: %d", i, p, tt.DecimalSize.Precision)
+ }
+ if s != tt.DecimalSize.Scale {
+ t.Errorf("(%d) got: %d, want: %d", i, s, tt.DecimalSize.Scale)
+ }
+ if ok != tt.DecimalSize.OK {
+ t.Errorf("(%d) got: %t, want: %t", i, ok, tt.DecimalSize.OK)
+ }
+ if c.ScanType() != tt.ScanType {
+ t.Errorf("(%d) got: %v, want: %v", i, c.ScanType(), tt.ScanType)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml
index 60436b202..ab9803902 100644
--- a/vendor/github.com/magiconair/properties/.travis.yml
+++ b/vendor/github.com/magiconair/properties/.travis.yml
@@ -5,4 +5,5 @@ go:
- 1.6.x
- 1.7.x
- 1.8.x
+ - 1.9.x
- tip
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index 4905fec99..ebd1bcd32 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -1,5 +1,10 @@
## Changelog
+### Unreleased
+
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+ Thanks to @mgurov for the fix.
+
### [1.7.3](https://github.com/magiconair/properties/tags/v1.7.3) - 10 Jul 2017
* [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index 4f3d5a458..85bb18618 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -511,6 +511,9 @@ func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
if p.DisableExpansion {
prev, ok = p.Get(key)
p.m[key] = value
+ if !ok {
+ p.k = append(p.k, key)
+ }
return prev, ok, nil
}
diff --git a/vendor/github.com/magiconair/properties/properties_test.go b/vendor/github.com/magiconair/properties/properties_test.go
index 0eac1f492..7e92618e0 100644
--- a/vendor/github.com/magiconair/properties/properties_test.go
+++ b/vendor/github.com/magiconair/properties/properties_test.go
@@ -458,6 +458,19 @@ func TestDisableExpansion(t *testing.T) {
assert.Equal(t, p.MustGet("keyB"), "${keyA}")
}
+func TestDisableExpansionStillUpdatesKeys(t *testing.T) {
+ p := NewProperties()
+ p.MustSet("p1", "a")
+ assert.Equal(t, p.Keys(), []string{"p1"})
+ assert.Equal(t, p.String(), "p1 = a\n")
+
+ p.DisableExpansion = true
+ p.MustSet("p2", "b")
+
+ assert.Equal(t, p.Keys(), []string{"p1", "p2"})
+ assert.Equal(t, p.String(), "p1 = a\np2 = b\n")
+}
+
func TestMustGet(t *testing.T) {
input := "key = value\nkey2 = ghi"
p := mustParse(t, input)
diff --git a/vendor/github.com/mattermost/html2text/LICENSE b/vendor/github.com/mattermost/html2text/LICENSE
index 24dc4abec..1f2423ecb 100644
--- a/vendor/github.com/mattermost/html2text/LICENSE
+++ b/vendor/github.com/mattermost/html2text/LICENSE
@@ -1,6 +1,7 @@
The MIT License (MIT)
Copyright (c) 2015 Jay Taylor
+Modified work: Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS
index f77e8a895..5903779d8 100644
--- a/vendor/github.com/miekg/dns/CONTRIBUTORS
+++ b/vendor/github.com/miekg/dns/CONTRIBUTORS
@@ -7,3 +7,4 @@ Marek Majkowski
Peter van Dijk
Omri Bahumi
Alex Sergeyev
+James Hartig
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
index 1c14a19d8..359a0ab8f 100644
--- a/vendor/github.com/miekg/dns/client.go
+++ b/vendor/github.com/miekg/dns/client.go
@@ -9,6 +9,7 @@ import (
"encoding/binary"
"io"
"net"
+ "strings"
"time"
)
@@ -27,11 +28,15 @@ type Conn struct {
// A Client defines parameters for a DNS client.
type Client struct {
- Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
- UDPSize uint16 // minimum receive buffer for UDP messages
- TLSConfig *tls.Config // TLS connection configuration
- Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
- DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TLSConfig *tls.Config // TLS connection configuration
+ Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
+ // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
+ // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
+ // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
+ Timeout time.Duration
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
@@ -44,91 +49,74 @@ type Client struct {
// will it fall back to TCP in case of truncation.
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
- var co *Conn
- co, err = DialTimeout("udp", a, dnsTimeout)
- if err != nil {
- return nil, err
- }
-
- defer co.Close()
+ client := Client{Net: "udp"}
+ r, _, err = client.Exchange(m, a)
+ return r, err
+}
- opt := m.IsEdns0()
- // If EDNS0 is used use that for size.
- if opt != nil && opt.UDPSize() >= MinMsgSize {
- co.UDPSize = opt.UDPSize()
+func (c *Client) dialTimeout() time.Duration {
+ if c.Timeout != 0 {
+ return c.Timeout
}
-
- co.SetWriteDeadline(time.Now().Add(dnsTimeout))
- if err = co.WriteMsg(m); err != nil {
- return nil, err
+ if c.DialTimeout != 0 {
+ return c.DialTimeout
}
+ return dnsTimeout
+}
- co.SetReadDeadline(time.Now().Add(dnsTimeout))
- r, err = co.ReadMsg()
- if err == nil && r.Id != m.Id {
- err = ErrId
+func (c *Client) readTimeout() time.Duration {
+ if c.ReadTimeout != 0 {
+ return c.ReadTimeout
}
- return r, err
+ return dnsTimeout
}
-// ExchangeContext performs a synchronous UDP query, like Exchange. It
-// additionally obeys deadlines from the passed Context.
-func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
- // Combine context deadline with built-in timeout. Context chooses whichever
- // is sooner.
- timeoutCtx, cancel := context.WithTimeout(ctx, dnsTimeout)
- defer cancel()
- deadline, _ := timeoutCtx.Deadline()
-
- co := new(Conn)
- dialer := net.Dialer{}
- co.Conn, err = dialer.DialContext(timeoutCtx, "udp", a)
- if err != nil {
- return nil, err
+func (c *Client) writeTimeout() time.Duration {
+ if c.WriteTimeout != 0 {
+ return c.WriteTimeout
}
+ return dnsTimeout
+}
- defer co.Conn.Close()
-
- opt := m.IsEdns0()
- // If EDNS0 is used use that for size.
- if opt != nil && opt.UDPSize() >= MinMsgSize {
- co.UDPSize = opt.UDPSize()
+func (c *Client) Dial(address string) (conn *Conn, err error) {
+ // create a new dialer with the appropriate timeout
+ var d net.Dialer
+ if c.Dialer == nil {
+ d = net.Dialer{}
+ } else {
+ d = net.Dialer(*c.Dialer)
}
+ d.Timeout = c.getTimeoutForRequest(c.writeTimeout())
- co.SetWriteDeadline(deadline)
- if err = co.WriteMsg(m); err != nil {
- return nil, err
- }
+ network := "udp"
+ useTLS := false
- co.SetReadDeadline(deadline)
- r, err = co.ReadMsg()
- if err == nil && r.Id != m.Id {
- err = ErrId
+ switch c.Net {
+ case "tcp-tls":
+ network = "tcp"
+ useTLS = true
+ case "tcp4-tls":
+ network = "tcp4"
+ useTLS = true
+ case "tcp6-tls":
+ network = "tcp6"
+ useTLS = true
+ default:
+ if c.Net != "" {
+ network = c.Net
+ }
}
- return r, err
-}
-// ExchangeConn performs a synchronous query. It sends the message m via the connection
-// c and waits for a reply. The connection c is not closed by ExchangeConn.
-// This function is going away, but can easily be mimicked:
-//
-// co := &dns.Conn{Conn: c} // c is your net.Conn
-// co.WriteMsg(m)
-// in, _ := co.ReadMsg()
-// co.Close()
-//
-func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
- println("dns: this function is deprecated")
- co := new(Conn)
- co.Conn = c
- if err = co.WriteMsg(m); err != nil {
- return nil, err
+ conn = new(Conn)
+ if useTLS {
+ conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
+ } else {
+ conn.Conn, err = d.Dial(network, address)
}
- r, err = co.ReadMsg()
- if err == nil && r.Id != m.Id {
- err = ErrId
+ if err != nil {
+ return nil, err
}
- return r, err
+ return conn, nil
}
// Exchange performs a synchronous query. It sends the message m to the address
@@ -142,22 +130,14 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
-// of 512 bytes.
-func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
- return c.ExchangeContext(context.Background(), m, a)
-}
-
-// ExchangeContext acts like Exchange, but honors the deadline on the provided
-// context, if present. If there is both a context deadline and a configured
-// timeout on the client, the earliest of the two takes effect.
-func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (
- r *Msg,
- rtt time.Duration,
- err error) {
+// of 512 bytes
+// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
+// attribute appropriately
+func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
- return c.exchange(ctx, m, a)
+ return c.exchange(m, address)
}
- // This adds a bunch of garbage, TODO(miek).
+
t := "nop"
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
t = t1
@@ -167,75 +147,18 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (
cl = cl1
}
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
- return c.exchange(ctx, m, a)
+ return c.exchange(m, address)
})
if r != nil && shared {
r = r.Copy()
}
- if err != nil {
- return r, rtt, err
- }
- return r, rtt, nil
-}
-
-func (c *Client) dialTimeout() time.Duration {
- if c.Timeout != 0 {
- return c.Timeout
- }
- if c.DialTimeout != 0 {
- return c.DialTimeout
- }
- return dnsTimeout
+ return r, rtt, err
}
-func (c *Client) readTimeout() time.Duration {
- if c.ReadTimeout != 0 {
- return c.ReadTimeout
- }
- return dnsTimeout
-}
-
-func (c *Client) writeTimeout() time.Duration {
- if c.WriteTimeout != 0 {
- return c.WriteTimeout
- }
- return dnsTimeout
-}
-
-func (c *Client) exchange(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var co *Conn
- network := "udp"
- tls := false
-
- switch c.Net {
- case "tcp-tls":
- network = "tcp"
- tls = true
- case "tcp4-tls":
- network = "tcp4"
- tls = true
- case "tcp6-tls":
- network = "tcp6"
- tls = true
- default:
- if c.Net != "" {
- network = c.Net
- }
- }
-
- var deadline time.Time
- if c.Timeout != 0 {
- deadline = time.Now().Add(c.Timeout)
- }
-
- dialDeadline := deadlineOrTimeoutOrCtx(ctx, deadline, c.dialTimeout())
- dialTimeout := dialDeadline.Sub(time.Now())
- if tls {
- co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, dialTimeout)
- } else {
- co, err = DialTimeout(network, a, dialTimeout)
- }
+ co, err = c.Dial(a)
if err != nil {
return nil, 0, err
@@ -253,12 +176,13 @@ func (c *Client) exchange(ctx context.Context, m *Msg, a string) (r *Msg, rtt ti
}
co.TsigSecret = c.TsigSecret
- co.SetWriteDeadline(deadlineOrTimeoutOrCtx(ctx, deadline, c.writeTimeout()))
+ // write with the appropriate write timeout
+ co.SetWriteDeadline(time.Now().Add(c.getTimeoutForRequest(c.writeTimeout())))
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
- co.SetReadDeadline(deadlineOrTimeoutOrCtx(ctx, deadline, c.readTimeout()))
+ co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
@@ -352,7 +276,7 @@ func tcpMsgLen(t io.Reader) (int, error) {
return 0, err
}
- // As seen with my local router/switch, retursn 1 byte on the above read,
+ // As seen with my local router/switch, returns 1 byte on the above read,
// resulting a a ShortRead. Just write it out (instead of loop) and read the
// other byte.
if n == 1 {
@@ -467,6 +391,24 @@ func (co *Conn) Write(p []byte) (n int, err error) {
return n, err
}
+// Return the appropriate timeout for a specific request
+func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
+ var requestTimeout time.Duration
+ if c.Timeout != 0 {
+ requestTimeout = c.Timeout
+ } else {
+ requestTimeout = timeout
+ }
+ // net.Dialer.Timeout has priority if smaller than the timeouts computed so
+ // far
+ if c.Dialer != nil && c.Dialer.Timeout != 0 {
+ if c.Dialer.Timeout < requestTimeout {
+ requestTimeout = c.Dialer.Timeout
+ }
+ }
+ return requestTimeout
+}
+
// Dial connects to the address on the named network.
func Dial(network, address string) (conn *Conn, err error) {
conn = new(Conn)
@@ -477,10 +419,44 @@ func Dial(network, address string) (conn *Conn, err error) {
return conn, nil
}
+// ExchangeContext performs a synchronous UDP query, like Exchange. It
+// additionally obeys deadlines from the passed Context.
+func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
+ client := Client{Net: "udp"}
+ r, _, err = client.ExchangeContext(ctx, m, a)
+ // ignorint rtt to leave the original ExchangeContext API unchanged, but
+ // this function will go away
+ return r, err
+}
+
+// ExchangeConn performs a synchronous query. It sends the message m via the connection
+// c and waits for a reply. The connection c is not closed by ExchangeConn.
+// This function is going away, but can easily be mimicked:
+//
+// co := &dns.Conn{Conn: c} // c is your net.Conn
+// co.WriteMsg(m)
+// in, _ := co.ReadMsg()
+// co.Close()
+//
+func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
+ println("dns: ExchangeConn: this function is deprecated")
+ co := new(Conn)
+ co.Conn = c
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
- conn = new(Conn)
- conn.Conn, err = net.DialTimeout(network, address, timeout)
+
+ client := Client{Net: "udp", Dialer: &net.Dialer{Timeout: timeout}}
+ conn, err = client.Dial(address)
if err != nil {
return nil, err
}
@@ -489,8 +465,12 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
- conn = new(Conn)
- conn.Conn, err = tls.Dial(network, address, tlsConfig)
+ if !strings.HasSuffix(network, "-tls") {
+ network += "-tls"
+ }
+ client := Client{Net: network, TLSConfig: tlsConfig}
+ conn, err = client.Dial(address)
+
if err != nil {
return nil, err
}
@@ -499,33 +479,29 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
- var dialer net.Dialer
- dialer.Timeout = timeout
-
- conn = new(Conn)
- conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
+ if !strings.HasSuffix(network, "-tls") {
+ network += "-tls"
+ }
+ client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
+ conn, err = client.Dial(address)
if err != nil {
return nil, err
}
return conn, nil
}
-// deadlineOrTimeout chooses between the provided deadline and timeout
-// by always preferring the deadline so long as it's non-zero (regardless
-// of which is bigger), and returns the equivalent deadline value.
-func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
- if deadline.IsZero() {
- return time.Now().Add(timeout)
- }
- return deadline
-}
-
-// deadlineOrTimeoutOrCtx returns the earliest of: a context deadline, or the
-// output of deadlineOrtimeout.
-func deadlineOrTimeoutOrCtx(ctx context.Context, deadline time.Time, timeout time.Duration) time.Time {
- result := deadlineOrTimeout(deadline, timeout)
- if ctxDeadline, ok := ctx.Deadline(); ok && ctxDeadline.Before(result) {
- result = ctxDeadline
+// ExchangeContext acts like Exchange, but honors the deadline on the provided
+// context, if present. If there is both a context deadline and a configured
+// timeout on the client, the earliest of the two takes effect.
+func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ var timeout time.Duration
+ if deadline, ok := ctx.Deadline(); !ok {
+ timeout = 0
+ } else {
+ timeout = deadline.Sub(time.Now())
}
- return result
+ // not passing the context to the underlying calls, as the API does not support
+ // context. For timeouts you should set up Client.Dialer and call Client.Exchange.
+ c.Dialer = &net.Dialer{Timeout: timeout}
+ return c.Exchange(m, a)
}
diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go
index 73083dbaf..3ff619cfa 100644
--- a/vendor/github.com/miekg/dns/client_test.go
+++ b/vendor/github.com/miekg/dns/client_test.go
@@ -11,6 +11,29 @@ import (
"time"
)
+func TestDialUDP(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("[::1]:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+ conn, err := c.Dial(addrstr)
+ if err != nil {
+ t.Fatalf("failed to dial: %v", err)
+ }
+ if conn == nil {
+ t.Fatalf("conn is nil")
+ }
+}
+
func TestClientSync(t *testing.T) {
HandleFunc("miek.nl.", HelloServer)
defer HandleRemove("miek.nl.")
@@ -27,9 +50,12 @@ func TestClientSync(t *testing.T) {
c := new(Client)
r, _, err := c.Exchange(m, addrstr)
if err != nil {
- t.Errorf("failed to exchange: %v", err)
+ t.Fatalf("failed to exchange: %v", err)
}
- if r != nil && r.Rcode != RcodeSuccess {
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r)
}
// And now with plain Exchange().
@@ -42,7 +68,42 @@ func TestClientSync(t *testing.T) {
}
}
-func TestClientTLSSync(t *testing.T) {
+func TestClientLocalAddress(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServerEchoAddrPort)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+ laddr := net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 12345, Zone: ""}
+ c.Dialer = &net.Dialer{LocalAddr: &laddr}
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Errorf("failed to exchange: %v", err)
+ }
+ if r != nil && r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+ if len(r.Extra) != 1 {
+ t.Errorf("failed to get additional answers\n%v", r)
+ }
+ txt := r.Extra[0].(*TXT)
+ if txt == nil {
+ t.Errorf("invalid TXT response\n%v", txt)
+ }
+ if len(txt.Txt) != 1 || txt.Txt[0] != "127.0.0.1:12345" {
+ t.Errorf("invalid TXT response\n%v", txt.Txt)
+ }
+}
+
+func TestClientTLSSyncV4(t *testing.T) {
HandleFunc("miek.nl.", HelloServer)
defer HandleRemove("miek.nl.")
@@ -65,6 +126,8 @@ func TestClientTLSSync(t *testing.T) {
m.SetQuestion("miek.nl.", TypeSOA)
c := new(Client)
+
+ // test tcp-tls
c.Net = "tcp-tls"
c.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
@@ -72,9 +135,88 @@ func TestClientTLSSync(t *testing.T) {
r, _, err := c.Exchange(m, addrstr)
if err != nil {
- t.Errorf("failed to exchange: %v", err)
+ t.Fatalf("failed to exchange: %v", err)
}
- if r != nil && r.Rcode != RcodeSuccess {
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+
+ // test tcp4-tls
+ c.Net = "tcp4-tls"
+ c.TLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatalf("failed to exchange: %v", err)
+ }
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+}
+
+func TestClientTLSSyncV6(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServer)
+ defer HandleRemove("miek.nl.")
+
+ cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock)
+ if err != nil {
+ t.Fatalf("unable to build certificate: %v", err)
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ s, addrstr, err := RunLocalTLSServer("[::1]:0", &config)
+ if err != nil {
+ t.Fatalf("unable to run test server: %v", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSOA)
+
+ c := new(Client)
+
+ // test tcp-tls
+ c.Net = "tcp-tls"
+ c.TLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+
+ r, _, err := c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatalf("failed to exchange: %v", err)
+ }
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
+ t.Errorf("failed to get an valid answer\n%v", r)
+ }
+
+ // test tcp6-tls
+ c.Net = "tcp6-tls"
+ c.TLSConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+
+ r, _, err = c.Exchange(m, addrstr)
+ if err != nil {
+ t.Fatalf("failed to exchange: %v", err)
+ }
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r)
}
}
@@ -120,11 +262,11 @@ func TestClientEDNS0(t *testing.T) {
c := new(Client)
r, _, err := c.Exchange(m, addrstr)
if err != nil {
- t.Errorf("failed to exchange: %v", err)
+ t.Fatalf("failed to exchange: %v", err)
}
if r != nil && r.Rcode != RcodeSuccess {
- t.Errorf("failed to get an valid answer\n%v", r)
+ t.Errorf("failed to get a valid answer\n%v", r)
}
}
@@ -171,11 +313,14 @@ func TestClientEDNS0Local(t *testing.T) {
c := new(Client)
r, _, err := c.Exchange(m, addrstr)
if err != nil {
- t.Errorf("failed to exchange: %s", err)
+ t.Fatalf("failed to exchange: %s", err)
}
- if r != nil && r.Rcode != RcodeSuccess {
- t.Error("failed to get a valid answer")
+ if r == nil {
+ t.Fatal("response is nil")
+ }
+ if r.Rcode != RcodeSuccess {
+ t.Fatal("failed to get a valid answer")
t.Logf("%v\n", r)
}
@@ -513,6 +658,9 @@ func TestConcurrentExchanges(t *testing.T) {
for i := 0; i < len(r); i++ {
go func(i int) {
r[i], _, _ = c.Exchange(m.Copy(), addrstr)
+ if r[i] == nil {
+ t.Fatalf("response %d is nil", i)
+ }
wg.Done()
}(i)
}
diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go
index 9ed03f296..c8c6af7b3 100644
--- a/vendor/github.com/miekg/dns/dnsutil/util.go
+++ b/vendor/github.com/miekg/dns/dnsutil/util.go
@@ -11,7 +11,7 @@ import (
"github.com/miekg/dns"
)
-// AddDomain adds origin to s if s is not already a FQDN.
+// AddOrigin adds origin to s if s is not already a FQDN.
// Note that the result may not be a FQDN. If origin does not end
// with a ".", the result won't either.
// This implements the zonefile convention (specified in RFC 1035,
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
index e38753d7d..ceabd24c1 100644
--- a/vendor/github.com/miekg/dns/doc.go
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -1,7 +1,7 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Server- and client-side programming is supported.
-The package allows complete control over what is send out to the DNS. The package
+The package allows complete control over what is sent out to the DNS. The package
API follows the less-is-more principle, by presenting a small, clean interface.
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
@@ -14,7 +14,7 @@ Basic usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
- Class: dns.ClassINET, Ttl: 3600}
+ Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
@@ -22,16 +22,16 @@ Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
-Or when the default TTL (3600) and class (IN) suit you:
+Or when the default origin (.) and TTL (3600) and class (IN) suit you:
- mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
+ mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource
-records (sets). Use pattern for creating a message:
+records (sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
@@ -51,7 +51,7 @@ The following is slightly more verbose, but more flexible:
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
-After creating a message it can be send.
+After creating a message it can be sent.
Basic use pattern for synchronous querying the DNS at a
server configured on 127.0.0.1 and port 53:
@@ -63,7 +63,23 @@ class) is as easy as setting:
c.SingleInflight = true
-If these "advanced" features are not needed, a simple UDP query can be send,
+More advanced options are availabe using a net.Dialer and the corresponding API.
+For example it is possible to set a timeout, or to specify a source IP address
+and port to use for the connection:
+
+ c := new(dns.Client)
+ laddr := net.UDPAddr{
+ IP: net.ParseIP("[::1]"),
+ Port: 12345,
+ Zone: "",
+ }
+ d := net.Dialer{
+ Timeout: 200 * time.Millisecond,
+ LocalAddr: &laddr,
+ }
+ in, rtt, err := c.ExchangeWithDialer(&d, m1, "8.8.8.8:53")
+
+If these "advanced" features are not needed, a simple UDP query can be sent,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index dbff3714c..718842f28 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -21,6 +21,7 @@ const (
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828)
+ EDNS0PADDING = 0xc // EDNS0 padding (RFC7830)
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
@@ -74,6 +75,8 @@ func (rr *OPT) String() string {
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
+ case *EDNS0_PADDING:
+ s += "\n; PADDING: " + o.String()
}
}
return s
@@ -595,3 +598,15 @@ func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
}
return
}
+
+// EDNS0_PADDING option is used to add padding to a request/response. The default
+// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
+// compression is applied before encryption which may break signatures.
+type EDNS0_PADDING struct {
+ Padding []byte
+}
+
+func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
+func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
+func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
+func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
diff --git a/vendor/github.com/miekg/dns/internal/socket/cmsghdr.go b/vendor/github.com/miekg/dns/internal/socket/cmsghdr.go
new file mode 100644
index 000000000..62f2d2f74
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/cmsghdr.go
@@ -0,0 +1,7 @@
+// +build linux
+
+package socket
+
+func (h *cmsghdr) len() int { return int(h.Len) }
+func (h *cmsghdr) lvl() int { return int(h.Level) }
+func (h *cmsghdr) typ() int { return int(h.Type) }
diff --git a/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_32bit.go b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_32bit.go
new file mode 100644
index 000000000..e92e85800
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_32bit.go
@@ -0,0 +1,20 @@
+// +build arm mips mipsle 386
+// +build linux
+
+package socket
+
+type cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+const (
+ sizeofCmsghdr = 0xc
+)
+
+func (h *cmsghdr) set(l, lvl, typ int) {
+ h.Len = uint32(l)
+ h.Level = int32(lvl)
+ h.Type = int32(typ)
+}
diff --git a/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_64bit.go b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_64bit.go
new file mode 100644
index 000000000..ddfc9e09a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_linux_64bit.go
@@ -0,0 +1,20 @@
+// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x
+// +build linux
+
+package socket
+
+type cmsghdr struct {
+ Len uint64
+ Level int32
+ Type int32
+}
+
+const (
+ sizeofCmsghdr = 0x10
+)
+
+func (h *cmsghdr) set(l, lvl, typ int) {
+ h.Len = uint64(l)
+ h.Level = int32(lvl)
+ h.Type = int32(typ)
+}
diff --git a/vendor/github.com/miekg/dns/internal/socket/cmsghdr_other.go b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_other.go
new file mode 100644
index 000000000..8078487c8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/cmsghdr_other.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package socket
+
+type cmsghdr struct{}
+
+const sizeofCmsghdr = 0
+
+func (h *cmsghdr) len() int { return 0 }
+func (h *cmsghdr) lvl() int { return 0 }
+func (h *cmsghdr) typ() int { return 0 }
+
+func (h *cmsghdr) set(l, lvl, typ int) {}
diff --git a/vendor/github.com/miekg/dns/internal/socket/controlmessage.go b/vendor/github.com/miekg/dns/internal/socket/controlmessage.go
new file mode 100644
index 000000000..3176e9602
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/controlmessage.go
@@ -0,0 +1,118 @@
+package socket
+
+import (
+ "errors"
+ "unsafe"
+)
+
+func controlHeaderLen() int {
+ return roundup(sizeofCmsghdr)
+}
+
+func controlMessageLen(dataLen int) int {
+ return roundup(sizeofCmsghdr) + dataLen
+}
+
+// returns the whole length of control message.
+func ControlMessageSpace(dataLen int) int {
+ return roundup(sizeofCmsghdr) + roundup(dataLen)
+}
+
+// A ControlMessage represents the head message in a stream of control
+// messages.
+//
+// A control message comprises of a header, data and a few padding
+// fields to conform to the interface to the kernel.
+//
+// See RFC 3542 for further information.
+type ControlMessage []byte
+
+// Data returns the data field of the control message at the head.
+func (m ControlMessage) Data(dataLen int) []byte {
+ l := controlHeaderLen()
+ if len(m) < l || len(m) < l+dataLen {
+ return nil
+ }
+ return m[l : l+dataLen]
+}
+
+// ParseHeader parses and returns the header fields of the control
+// message at the head.
+func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) {
+ l := controlHeaderLen()
+ if len(m) < l {
+ return 0, 0, 0, errors.New("short message")
+ }
+ h := (*cmsghdr)(unsafe.Pointer(&m[0]))
+ return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil
+}
+
+// Next returns the control message at the next.
+func (m ControlMessage) Next(dataLen int) ControlMessage {
+ l := ControlMessageSpace(dataLen)
+ if len(m) < l {
+ return nil
+ }
+ return m[l:]
+}
+
+// MarshalHeader marshals the header fields of the control message at
+// the head.
+func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error {
+ if len(m) < controlHeaderLen() {
+ return errors.New("short message")
+ }
+ h := (*cmsghdr)(unsafe.Pointer(&m[0]))
+ h.set(controlMessageLen(dataLen), lvl, typ)
+ return nil
+}
+
+// Marshal marshals the control message at the head, and returns the next
+// control message.
+func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) {
+ l := len(data)
+ if len(m) < ControlMessageSpace(l) {
+ return nil, errors.New("short message")
+ }
+ h := (*cmsghdr)(unsafe.Pointer(&m[0]))
+ h.set(controlMessageLen(l), lvl, typ)
+ if l > 0 {
+ copy(m.Data(l), data)
+ }
+ return m.Next(l), nil
+}
+
+// Parse parses as a single or multiple control messages.
+func (m ControlMessage) Parse() ([]ControlMessage, error) {
+ var ms []ControlMessage
+ for len(m) >= controlHeaderLen() {
+ h := (*cmsghdr)(unsafe.Pointer(&m[0]))
+ l := h.len()
+ if l <= 0 {
+ return nil, errors.New("invalid header length")
+ }
+ if uint64(l) < uint64(controlHeaderLen()) {
+ return nil, errors.New("invalid message length")
+ }
+ if uint64(l) > uint64(len(m)) {
+ return nil, errors.New("short buffer")
+ }
+ ms = append(ms, ControlMessage(m[:l]))
+ ll := l - controlHeaderLen()
+ if len(m) >= ControlMessageSpace(ll) {
+ m = m[ControlMessageSpace(ll):]
+ } else {
+ m = m[controlMessageLen(ll):]
+ }
+ }
+ return ms, nil
+}
+
+// NewControlMessage returns a new stream of control messages.
+func NewControlMessage(dataLen []int) ControlMessage {
+ var l int
+ for i := range dataLen {
+ l += ControlMessageSpace(dataLen[i])
+ }
+ return make([]byte, l)
+}
diff --git a/vendor/github.com/miekg/dns/internal/socket/controlmessage_test.go b/vendor/github.com/miekg/dns/internal/socket/controlmessage_test.go
new file mode 100644
index 000000000..e9fff4d45
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/controlmessage_test.go
@@ -0,0 +1,103 @@
+// +build linux
+
+package socket
+
+import (
+ "bytes"
+ "testing"
+)
+
+type mockControl struct {
+ Level int
+ Type int
+ Data []byte
+}
+
+func TestControlMessage(t *testing.T) {
+ for _, tt := range []struct {
+ cs []mockControl
+ }{
+ {
+ []mockControl{
+ {Level: 1, Type: 1},
+ },
+ },
+ {
+ []mockControl{
+ {Level: 2, Type: 2, Data: []byte{0xfe}},
+ },
+ },
+ {
+ []mockControl{
+ {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}},
+ },
+ },
+ {
+ []mockControl{
+ {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}},
+ },
+ },
+ {
+ []mockControl{
+ {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}},
+ {Level: 2, Type: 2, Data: []byte{0xfe}},
+ },
+ },
+ } {
+ var w []byte
+ var tailPadLen int
+ mm := NewControlMessage([]int{0})
+ for i, c := range tt.cs {
+ m := NewControlMessage([]int{len(c.Data)})
+ l := len(m) - len(mm)
+ if i == len(tt.cs)-1 && l > len(c.Data) {
+ tailPadLen = l - len(c.Data)
+ }
+ w = append(w, m...)
+ }
+
+ var err error
+ ww := make([]byte, len(w))
+ copy(ww, w)
+ m := ControlMessage(ww)
+ for _, c := range tt.cs {
+ if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil {
+ t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err)
+ }
+ copy(m.Data(len(c.Data)), c.Data)
+ m = m.Next(len(c.Data))
+ }
+ m = ControlMessage(w)
+ for _, c := range tt.cs {
+ m, err = m.Marshal(c.Level, c.Type, c.Data)
+ if err != nil {
+ t.Fatalf("(%v).Marshal() = %v", tt.cs, err)
+ }
+ }
+ if !bytes.Equal(ww, w) {
+ t.Fatalf("got %#v; want %#v", ww, w)
+ }
+
+ ws := [][]byte{w}
+ if tailPadLen > 0 {
+ // Test a message with no tail padding.
+ nopad := w[:len(w)-tailPadLen]
+ ws = append(ws, [][]byte{nopad}...)
+ }
+ for _, w := range ws {
+ ms, err := ControlMessage(w).Parse()
+ if err != nil {
+ t.Fatalf("(%v).Parse() = %v", tt.cs, err)
+ }
+ for i, m := range ms {
+ lvl, typ, dataLen, err := m.ParseHeader()
+ if err != nil {
+ t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err)
+ }
+ if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) {
+ t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data))
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/internal/socket/socket.go b/vendor/github.com/miekg/dns/internal/socket/socket.go
new file mode 100644
index 000000000..edb58e29a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/socket.go
@@ -0,0 +1,4 @@
+// Package socket contains ControlMessage parsing code from
+// golang.org/x/net/internal/socket. Instead of supporting all possible
+// architectures, we're only supporting linux 32/64 bit.
+package socket
diff --git a/vendor/github.com/miekg/dns/internal/socket/sys.go b/vendor/github.com/miekg/dns/internal/socket/sys.go
new file mode 100644
index 000000000..2f3f5cfed
--- /dev/null
+++ b/vendor/github.com/miekg/dns/internal/socket/sys.go
@@ -0,0 +1,14 @@
+package socket
+
+import "unsafe"
+
+var (
+ kernelAlign = func() int {
+ var p uintptr
+ return int(unsafe.Sizeof(p))
+ }()
+)
+
+func roundup(l int) int {
+ return (l + kernelAlign - 1) & ^(kernelAlign - 1)
+}
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
index 8d415c92a..0ca12b048 100644
--- a/vendor/github.com/miekg/dns/msg_helpers.go
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -458,6 +458,13 @@ Option:
}
edns = append(edns, e)
off += int(optlen)
+ case EDNS0PADDING:
+ e := new(EDNS0_PADDING)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go
index fc5bdaf5d..c7ecb499d 100644
--- a/vendor/github.com/miekg/dns/parse_test.go
+++ b/vendor/github.com/miekg/dns/parse_test.go
@@ -8,6 +8,7 @@ import (
"math/rand"
"net"
"reflect"
+ "regexp"
"strconv"
"strings"
"testing"
@@ -571,81 +572,88 @@ test IN CNAME test.a.example.com.
t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
}
-func ExampleParseZone() {
- zone := `$ORIGIN .
-$TTL 3600 ; 1 hour
-name IN SOA a6.nstld.com. hostmaster.nic.name. (
- 203362132 ; serial
- 300 ; refresh (5 minutes)
- 300 ; retry (5 minutes)
- 1209600 ; expire (2 weeks)
- 300 ; minimum (5 minutes)
- )
-$TTL 10800 ; 3 hours
-name. 10800 IN NS name.
- IN NS g6.nstld.com.
- 7200 NS h6.nstld.com.
- 3600 IN NS j6.nstld.com.
- IN 3600 NS k6.nstld.com.
- NS l6.nstld.com.
- NS a6.nstld.com.
- NS c6.nstld.com.
- NS d6.nstld.com.
- NS f6.nstld.com.
- NS m6.nstld.com.
-(
- NS m7.nstld.com.
-)
-$ORIGIN name.
-0-0onlus NS ns7.ehiweb.it.
- NS ns8.ehiweb.it.
-0-g MX 10 mx01.nic
- MX 10 mx02.nic
- MX 10 mx03.nic
- MX 10 mx04.nic
-$ORIGIN 0-g.name
-moutamassey NS ns01.yahoodomains.jp.
- NS ns02.yahoodomains.jp.
+func TestOmittedTTL(t *testing.T) {
+ zone := `
+$ORIGIN example.com.
+example.com. 42 IN SOA ns1.example.com. hostmaster.example.com. 1 86400 60 86400 3600 ; TTL=42 SOA
+example.com. NS 2 ; TTL=42 absolute owner name
+@ MD 3 ; TTL=42 current-origin owner name
+ MF 4 ; TTL=42 leading-space implied owner name
+ 43 TYPE65280 \# 1 05 ; TTL=43 implied owner name explicit TTL
+ MB 6 ; TTL=43 leading-tab implied owner name
+$TTL 1337
+example.com. 88 MG 7 ; TTL=88 explicit TTL
+example.com. MR 8 ; TTL=1337 after first $TTL
+$TTL 314
+ 1 TXT 9 ; TTL=1 implied owner name explicit TTL
+example.com. DNAME 10 ; TTL=314 after second $TTL
`
- to := ParseZone(strings.NewReader(zone), "", "testzone")
- for x := range to {
- fmt.Println(x.RR)
- }
- // Output:
- // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
- // name. 10800 IN NS name.
- // name. 10800 IN NS g6.nstld.com.
- // name. 7200 IN NS h6.nstld.com.
- // name. 3600 IN NS j6.nstld.com.
- // name. 3600 IN NS k6.nstld.com.
- // name. 10800 IN NS l6.nstld.com.
- // name. 10800 IN NS a6.nstld.com.
- // name. 10800 IN NS c6.nstld.com.
- // name. 10800 IN NS d6.nstld.com.
- // name. 10800 IN NS f6.nstld.com.
- // name. 10800 IN NS m6.nstld.com.
- // name. 10800 IN NS m7.nstld.com.
- // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it.
- // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it.
- // 0-g.name. 10800 IN MX 10 mx01.nic.name.
- // 0-g.name. 10800 IN MX 10 mx02.nic.name.
- // 0-g.name. 10800 IN MX 10 mx03.nic.name.
- // 0-g.name. 10800 IN MX 10 mx04.nic.name.
- // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp.
- // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp.
+ reCaseFromComment := regexp.MustCompile(`TTL=(\d+)\s+(.*)`)
+ records := ParseZone(strings.NewReader(zone), "", "")
+ var i int
+ for record := range records {
+ i++
+ if record.Error != nil {
+ t.Error(record.Error)
+ continue
+ }
+ expected := reCaseFromComment.FindStringSubmatch(record.Comment)
+ expectedTTL, _ := strconv.ParseUint(expected[1], 10, 32)
+ ttl := record.RR.Header().Ttl
+ if ttl != uint32(expectedTTL) {
+ t.Errorf("%s: expected TTL %d, got %d", expected[2], expectedTTL, ttl)
+ }
+ }
+ if i != 10 {
+ t.Errorf("expected %d records, got %d", 5, i)
+ }
}
-func ExampleHIP() {
- h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578
- AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p
-9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ
-b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
- rvs.example.com. )`
- if hip, err := NewRR(h); err == nil {
- fmt.Println(hip.String())
+func TestRelativeNameErrors(t *testing.T) {
+ var badZones = []struct {
+ label string
+ zoneContents string
+ expectedErr string
+ }{
+ {
+ "relative owner name without origin",
+ "example.com 3600 IN SOA ns.example.com. hostmaster.example.com. 1 86400 60 86400 3600",
+ "bad owner name",
+ },
+ {
+ "relative owner name in RDATA",
+ "example.com. 3600 IN SOA ns hostmaster 1 86400 60 86400 3600",
+ "bad SOA Ns",
+ },
+ {
+ "origin reference without origin",
+ "@ 3600 IN SOA ns.example.com. hostmaster.example.com. 1 86400 60 86400 3600",
+ "bad owner name",
+ },
+ {
+ "relative owner name in $INCLUDE",
+ "$INCLUDE file.db example.com",
+ "bad origin name",
+ },
+ {
+ "relative owner name in $ORIGIN",
+ "$ORIGIN example.com",
+ "bad origin name",
+ },
+ }
+ for _, errorCase := range badZones {
+ entries := ParseZone(strings.NewReader(errorCase.zoneContents), "", "")
+ for entry := range entries {
+ if entry.Error == nil {
+ t.Errorf("%s: expected error, got nil", errorCase.label)
+ continue
+ }
+ err := entry.Error.err
+ if err != errorCase.expectedErr {
+ t.Errorf("%s: expected error `%s`, got `%s`", errorCase.label, errorCase.expectedErr, err)
+ }
+ }
}
- // Output:
- // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
}
func TestHIP(t *testing.T) {
@@ -686,24 +694,6 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
}
}
-func ExampleSOA() {
- s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
- if soa, err := NewRR(s); err == nil {
- fmt.Println(soa.String())
- }
- // Output:
- // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
-}
-
-func TestLineNumberError(t *testing.T) {
- s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
- if _, err := NewRR(s); err != nil {
- if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
- t.Error("not expecting this error: ", err)
- }
- }
-}
-
// Test with no known RR on the line
func TestLineNumberError2(t *testing.T) {
tests := map[string]string{
@@ -801,28 +791,6 @@ func TestLowercaseTokens(t *testing.T) {
}
}
-func ExampleParseZone_generate() {
- // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761
- zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0"
- to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
- for x := range to {
- if x.Error == nil {
- fmt.Println(x.RR.String())
- }
- }
- // Output:
- // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE.
- // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE.
- // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA.
- // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA.
- // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA.
- // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA.
- // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA.
- // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA.
- // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA.
- // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA.
-}
-
func TestSRVPacking(t *testing.T) {
msg := Msg{}
diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go
index 72ec8f5c0..f50d0f48c 100644
--- a/vendor/github.com/miekg/dns/privaterr_test.go
+++ b/vendor/github.com/miekg/dns/privaterr_test.go
@@ -143,7 +143,7 @@ func (rd *VERSION) Len() int {
}
var smallzone = `$ORIGIN example.org.
-@ SOA sns.dns.icann.org. noc.dns.icann.org. (
+@ 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. (
2014091518 7200 3600 1209600 3600
)
A 1.2.3.4
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index 5f7f64423..c7b1eb19a 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -105,6 +105,12 @@ type Token struct {
Comment string
}
+// ttlState describes the state necessary to fill in an omitted RR TTL
+type ttlState struct {
+ ttl uint32 // ttl is the current default TTL
+ isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
+}
+
// NewRR reads the RR contained in the string s. Only the first RR is
// returned. If s contains no RR, return nil with no error. The class
// defaults to IN and TTL defaults to 3600. The full zone file syntax
@@ -120,7 +126,8 @@ func NewRR(s string) (RR, error) {
// ReadRR reads the RR contained in q.
// See NewRR for more documentation.
func ReadRR(q io.Reader, filename string) (RR, error) {
- r := <-parseZoneHelper(q, ".", filename, 1)
+ defttl := &ttlState{defaultTtl, false}
+ r := <-parseZoneHelper(q, ".", defttl, filename, 1)
if r == nil {
return nil, nil
}
@@ -132,10 +139,10 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
}
// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
-// returned channel, which consist out the parsed RR, a potential comment or an error.
-// If there is an error the RR is nil. The string file is only used
+// returned channel, each consisting of either a parsed RR and optional comment
+// or a nil RR and an error. The string file is only used
// in error reporting. The string origin is used as the initial origin, as
-// if the file would start with: $ORIGIN origin .
+// if the file would start with an $ORIGIN directive.
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
// The channel t is closed by ParseZone when the end of r is reached.
//
@@ -157,16 +164,16 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
// The text "; this is comment" is returned in Token.Comment. Comments inside the
// RR are discarded. Comments on a line by themselves are discarded too.
func ParseZone(r io.Reader, origin, file string) chan *Token {
- return parseZoneHelper(r, origin, file, 10000)
+ return parseZoneHelper(r, origin, nil, file, 10000)
}
-func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
+func parseZoneHelper(r io.Reader, origin string, defttl *ttlState, file string, chansize int) chan *Token {
t := make(chan *Token, chansize)
- go parseZone(r, origin, file, t, 0)
+ go parseZone(r, origin, defttl, file, t, 0)
return t
}
-func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
+func parseZone(r io.Reader, origin string, defttl *ttlState, f string, t chan *Token, include int) {
defer func() {
if include == 0 {
close(t)
@@ -186,18 +193,16 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
- if origin == "" {
- origin = "."
- }
- origin = Fqdn(origin)
- if _, ok := IsDomainName(origin); !ok {
- t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
- return
+ if origin != "" {
+ origin = Fqdn(origin)
+ if _, ok := IsDomainName(origin); !ok {
+ t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
+ return
+ }
}
st := zExpectOwnerDir // initial state
var h RR_Header
- var defttl uint32 = defaultTtl
var prevName string
for l := range c {
// Lexer spotted an error already
@@ -209,27 +214,21 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
switch st {
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
- h.Ttl = defttl
+ if defttl != nil {
+ h.Ttl = defttl.ttl
+ }
h.Class = ClassINET
switch l.value {
case zNewline:
st = zExpectOwnerDir
case zOwner:
h.Name = l.token
- if l.token[0] == '@' {
- h.Name = origin
- prevName = h.Name
- st = zExpectOwnerBl
- break
- }
- if h.Name[l.length-1] != '.' {
- h.Name = appendOrigin(h.Name, origin)
- }
- _, ok := IsDomainName(l.token)
+ name, ok := toAbsoluteName(l.token, origin)
if !ok {
t <- &Token{Error: &ParseError{f, "bad owner name", l}}
return
}
+ h.Name = name
prevName = h.Name
st = zExpectOwnerBl
case zDirTtl:
@@ -258,8 +257,9 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
h.Ttl = ttl
- // Don't about the defttl, we should take the $TTL value
- // defttl = ttl
+ if defttl == nil || !defttl.isByDirective {
+ defttl = &ttlState{ttl, false}
+ }
st = zExpectAnyNoTtlBl
default:
@@ -282,20 +282,12 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
case zBlank:
l := <-c
if l.value == zString {
- if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
+ name, ok := toAbsoluteName(l.token, origin)
+ if !ok {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
}
- // a new origin is specified.
- if l.token[l.length-1] != '.' {
- if origin != "." { // Prevent .. endings
- neworigin = l.token + "." + origin
- } else {
- neworigin = l.token + origin
- }
- } else {
- neworigin = l.token
- }
+ neworigin = name
}
case zNewline, zEOF:
// Ok
@@ -313,7 +305,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
return
}
- parseZone(r1, l.token, neworigin, t, include+1)
+ parseZone(r1, neworigin, defttl, l.token, t, include+1)
st = zExpectOwnerDir
case zExpectDirTtlBl:
if l.value != zBlank {
@@ -335,7 +327,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
}
- defttl = ttl
+ defttl = &ttlState{ttl, true}
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
@@ -351,19 +343,12 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
if e, _ := slurpRemainder(c, f); e != nil {
t <- &Token{Error: e}
}
- if _, ok := IsDomainName(l.token); !ok {
+ name, ok := toAbsoluteName(l.token, origin)
+ if !ok {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
}
- if l.token[l.length-1] != '.' {
- if origin != "." { // Prevent .. endings
- origin = l.token + "." + origin
- } else {
- origin = l.token + origin
- }
- } else {
- origin = l.token
- }
+ origin = name
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
@@ -390,6 +375,10 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
case zExpectAny:
switch l.value {
case zRrtpe:
+ if defttl == nil {
+ t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}}
+ return
+ }
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
@@ -402,7 +391,9 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
h.Ttl = ttl
- // defttl = ttl // don't set the defttl here
+ if defttl == nil || !defttl.isByDirective {
+ defttl = &ttlState{ttl, false}
+ }
st = zExpectAnyNoTtlBl
default:
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
@@ -441,7 +432,9 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
h.Ttl = ttl
- // defttl = ttl // don't set the def ttl anymore
+ if defttl == nil || !defttl.isByDirective {
+ defttl = &ttlState{ttl, false}
+ }
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
@@ -918,6 +911,34 @@ func stringToCm(token string) (e, m uint8, ok bool) {
return
}
+func toAbsoluteName(name, origin string) (absolute string, ok bool) {
+ // check for an explicit origin reference
+ if name == "@" {
+ // require a nonempty origin
+ if origin == "" {
+ return "", false
+ }
+ return origin, true
+ }
+
+ // require a valid domain name
+ _, ok = IsDomainName(name)
+ if !ok || name == "" {
+ return "", false
+ }
+
+ // check if name is already absolute
+ if name[len(name)-1] == '.' {
+ return name, true
+ }
+
+ // require a nonempty origin
+ if origin == "" {
+ return "", false
+ }
+ return appendOrigin(name, origin), true
+}
+
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index b8b18fd77..824b9c949 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -130,9 +130,10 @@ func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 { // Dynamic updates.
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
rr.A = net.ParseIP(l.token)
if rr.A == nil || l.err {
return nil, &ParseError{f, "bad A A", l}, ""
@@ -145,9 +146,10 @@ func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
rr.AAAA = net.ParseIP(l.token)
if rr.AAAA == nil || l.err {
return nil, &ParseError{f, "bad AAAA AAAA", l}, ""
@@ -161,20 +163,15 @@ func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Ns = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Ns = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad NS Ns", l}, ""
}
- if rr.Ns[l.length-1] != '.' {
- rr.Ns = appendOrigin(rr.Ns, o)
- }
+ rr.Ns = name
return rr, nil, ""
}
@@ -187,17 +184,12 @@ func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- if l.token == "@" {
- rr.Ptr = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad PTR Ptr", l}, ""
}
- if rr.Ptr[l.length-1] != '.' {
- rr.Ptr = appendOrigin(rr.Ptr, o)
- }
+ rr.Ptr = name
return rr, nil, ""
}
@@ -207,20 +199,15 @@ func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string)
l := <-c
rr.Ptr = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Ptr = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, ""
}
- if rr.Ptr[l.length-1] != '.' {
- rr.Ptr = appendOrigin(rr.Ptr, o)
- }
+ rr.Ptr = name
return rr, nil, ""
}
@@ -230,34 +217,26 @@ func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Mbox = l.token
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- if l.token == "@" {
- rr.Mbox = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad RP Mbox", l}, ""
- }
- if rr.Mbox[l.length-1] != '.' {
- rr.Mbox = appendOrigin(rr.Mbox, o)
- }
+
+ mbox, mboxOk := toAbsoluteName(l.token, o)
+ if l.err || !mboxOk {
+ return nil, &ParseError{f, "bad RP Mbox", l}, ""
}
+ rr.Mbox = mbox
+
<-c // zBlank
l = <-c
rr.Txt = l.token
- if l.token == "@" {
- rr.Txt = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ txt, txtOk := toAbsoluteName(l.token, o)
+ if l.err || !txtOk {
return nil, &ParseError{f, "bad RP Txt", l}, ""
}
- if rr.Txt[l.length-1] != '.' {
- rr.Txt = appendOrigin(rr.Txt, o)
- }
+ rr.Txt = txt
+
return rr, nil, ""
}
@@ -267,20 +246,15 @@ func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Mr = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Mr = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MR Mr", l}, ""
}
- if rr.Mr[l.length-1] != '.' {
- rr.Mr = appendOrigin(rr.Mr, o)
- }
+ rr.Mr = name
return rr, nil, ""
}
@@ -290,20 +264,15 @@ func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Mb = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Mb = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MB Mb", l}, ""
}
- if rr.Mb[l.length-1] != '.' {
- rr.Mb = appendOrigin(rr.Mb, o)
- }
+ rr.Mb = name
return rr, nil, ""
}
@@ -313,20 +282,15 @@ func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Mg = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Mg = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MG Mg", l}, ""
}
- if rr.Mg[l.length-1] != '.' {
- rr.Mg = appendOrigin(rr.Mg, o)
- }
+ rr.Mg = name
return rr, nil, ""
}
@@ -362,34 +326,26 @@ func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Rmail = l.token
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- if l.token == "@" {
- rr.Rmail = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad MINFO Rmail", l}, ""
- }
- if rr.Rmail[l.length-1] != '.' {
- rr.Rmail = appendOrigin(rr.Rmail, o)
- }
+
+ rmail, rmailOk := toAbsoluteName(l.token, o)
+ if l.err || !rmailOk {
+ return nil, &ParseError{f, "bad MINFO Rmail", l}, ""
}
+ rr.Rmail = rmail
+
<-c // zBlank
l = <-c
rr.Email = l.token
- if l.token == "@" {
- rr.Email = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ email, emailOk := toAbsoluteName(l.token, o)
+ if l.err || !emailOk {
return nil, &ParseError{f, "bad MINFO Email", l}, ""
}
- if rr.Email[l.length-1] != '.' {
- rr.Email = appendOrigin(rr.Email, o)
- }
+ rr.Email = email
+
return rr, nil, ""
}
@@ -399,20 +355,15 @@ func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Mf = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Mf = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MF Mf", l}, ""
}
- if rr.Mf[l.length-1] != '.' {
- rr.Mf = appendOrigin(rr.Mf, o)
- }
+ rr.Mf = name
return rr, nil, ""
}
@@ -422,20 +373,15 @@ func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Md = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Md = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MD Md", l}, ""
}
- if rr.Md[l.length-1] != '.' {
- rr.Md = appendOrigin(rr.Md, o)
- }
+ rr.Md = name
return rr, nil, ""
}
@@ -444,57 +390,54 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad MX Pref", l}, ""
}
rr.Preference = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Mx = l.token
- if l.token == "@" {
- rr.Mx = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad MX Mx", l}, ""
}
- if rr.Mx[l.length-1] != '.' {
- rr.Mx = appendOrigin(rr.Mx, o)
- }
+ rr.Mx = name
+
return rr, nil, ""
}
func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(RT)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil {
return nil, &ParseError{f, "bad RT Preference", l}, ""
}
rr.Preference = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Host = l.token
- if l.token == "@" {
- rr.Host = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad RT Host", l}, ""
}
- if rr.Host[l.length-1] != '.' {
- rr.Host = appendOrigin(rr.Host, o)
- }
+ rr.Host = name
+
return rr, nil, ""
}
@@ -503,28 +446,25 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
}
rr.Subtype = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Hostname = l.token
- if l.token == "@" {
- rr.Hostname = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad AFSDB Hostname", l}, ""
}
- if rr.Hostname[l.length-1] != '.' {
- rr.Hostname = appendOrigin(rr.Hostname, o)
- }
+ rr.Hostname = name
return rr, nil, ""
}
@@ -533,9 +473,10 @@ func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
if l.err {
return nil, &ParseError{f, "bad X25 PSDNAddress", l}, ""
}
@@ -548,28 +489,25 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad KX Pref", l}, ""
}
rr.Preference = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Exchanger = l.token
- if l.token == "@" {
- rr.Exchanger = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad KX Exchanger", l}, ""
}
- if rr.Exchanger[l.length-1] != '.' {
- rr.Exchanger = appendOrigin(rr.Exchanger, o)
- }
+ rr.Exchanger = name
return rr, nil, ""
}
@@ -579,20 +517,15 @@ func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Target = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Target = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad CNAME Target", l}, ""
}
- if rr.Target[l.length-1] != '.' {
- rr.Target = appendOrigin(rr.Target, o)
- }
+ rr.Target = name
return rr, nil, ""
}
@@ -602,20 +535,15 @@ func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Target = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Target = o
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad CNAME Target", l}, ""
- }
- if rr.Target[l.length-1] != '.' {
- rr.Target = appendOrigin(rr.Target, o)
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return nil, &ParseError{f, "bad DNAME Target", l}, ""
}
+ rr.Target = name
return rr, nil, ""
}
@@ -625,35 +553,26 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.Ns = l.token
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- <-c // zBlank
- if l.token == "@" {
- rr.Ns = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad SOA Ns", l}, ""
- }
- if rr.Ns[l.length-1] != '.' {
- rr.Ns = appendOrigin(rr.Ns, o)
- }
+
+ ns, nsOk := toAbsoluteName(l.token, o)
+ if l.err || !nsOk {
+ return nil, &ParseError{f, "bad SOA Ns", l}, ""
}
+ rr.Ns = ns
+ <-c // zBlank
l = <-c
rr.Mbox = l.token
- if l.token == "@" {
- rr.Mbox = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad SOA Mbox", l}, ""
- }
- if rr.Mbox[l.length-1] != '.' {
- rr.Mbox = appendOrigin(rr.Mbox, o)
- }
+
+ mbox, mboxOk := toAbsoluteName(l.token, o)
+ if l.err || !mboxOk {
+ return nil, &ParseError{f, "bad SOA Mbox", l}, ""
}
+ rr.Mbox = mbox
+
<-c // zBlank
var (
@@ -667,9 +586,10 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
if i == 0 {
- // Serial should be a number
+ // Serial must be a number
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
}
+ // We allow other fields to be unitful duration strings
if v, ok = stringToTtl(l.token); !ok {
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
@@ -702,14 +622,16 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad SRV Priority", l}, ""
}
rr.Priority = uint16(i)
+
<-c // zBlank
l = <-c // zString
i, e = strconv.ParseUint(l.token, 10, 16)
@@ -717,6 +639,7 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad SRV Weight", l}, ""
}
rr.Weight = uint16(i)
+
<-c // zBlank
l = <-c // zString
i, e = strconv.ParseUint(l.token, 10, 16)
@@ -724,20 +647,16 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad SRV Port", l}, ""
}
rr.Port = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Target = l.token
- if l.token == "@" {
- rr.Target = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad SRV Target", l}, ""
}
- if rr.Target[l.length-1] != '.' {
- rr.Target = appendOrigin(rr.Target, o)
- }
+ rr.Target = name
return rr, nil, ""
}
@@ -746,14 +665,16 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NAPTR Order", l}, ""
}
rr.Order = uint16(i)
+
<-c // zBlank
l = <-c // zString
i, e = strconv.ParseUint(l.token, 10, 16)
@@ -761,6 +682,7 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
}
rr.Preference = uint16(i)
+
// Flags
<-c // zBlank
l = <-c // _QUOTE
@@ -817,21 +739,17 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
} else {
return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
}
+
// After quote no space??
<-c // zBlank
l = <-c // zString
rr.Replacement = l.token
- if l.token == "@" {
- rr.Replacement = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad NAPTR Replacement", l}, ""
}
- if rr.Replacement[l.length-1] != '.' {
- rr.Replacement = appendOrigin(rr.Replacement, o)
- }
+ rr.Replacement = name
return rr, nil, ""
}
@@ -841,34 +759,26 @@ func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.PreviousName = l.token
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
- if l.token == "@" {
- rr.PreviousName = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad TALINK PreviousName", l}, ""
- }
- if rr.PreviousName[l.length-1] != '.' {
- rr.PreviousName = appendOrigin(rr.PreviousName, o)
- }
+
+ previousName, previousNameOk := toAbsoluteName(l.token, o)
+ if l.err || !previousNameOk {
+ return nil, &ParseError{f, "bad TALINK PreviousName", l}, ""
}
+ rr.PreviousName = previousName
+
<-c // zBlank
l = <-c
rr.NextName = l.token
- if l.token == "@" {
- rr.NextName = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+
+ nextName, nextNameOk := toAbsoluteName(l.token, o)
+ if l.err || !nextNameOk {
return nil, &ParseError{f, "bad TALINK NextName", l}, ""
}
- if rr.NextName[l.length-1] != '.' {
- rr.NextName = appendOrigin(rr.NextName, o)
- }
+ rr.NextName = nextName
+
return rr, nil, ""
}
@@ -880,9 +790,10 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.VertPre = 162 // 10
rr.Size = 18 // 1
ok := false
+
// North
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
i, e := strconv.ParseUint(l.token, 10, 32)
@@ -1013,14 +924,16 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
// HitLength is not represented
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
}
rr.PublicKeyAlgorithm = uint8(i)
+
<-c // zBlank
l = <-c // zString
if l.length == 0 || l.err {
@@ -1043,19 +956,11 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zString:
- if l.token == "@" {
- xs = append(xs, o)
- l = <-c
- continue
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
}
- if l.token[l.length-1] != '.' {
- l.token = appendOrigin(l.token, o)
- }
- xs = append(xs, l.token)
+ xs = append(xs, name)
case zBlank:
// Ok
default:
@@ -1072,9 +977,10 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
if v, ok := StringToCertType[l.token]; ok {
rr.Type = v
} else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
@@ -1129,10 +1035,12 @@ func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(RRSIG)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
if t, ok := StringToType[l.tokenUpper]; !ok {
if strings.HasPrefix(l.tokenUpper, "TYPE") {
t, ok = typeToInt(l.tokenUpper)
@@ -1146,6 +1054,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
} else {
rr.TypeCovered = t
}
+
<-c // zBlank
l = <-c
i, err := strconv.ParseUint(l.token, 10, 8)
@@ -1153,6 +1062,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
}
rr.Algorithm = uint8(i)
+
<-c // zBlank
l = <-c
i, err = strconv.ParseUint(l.token, 10, 8)
@@ -1160,6 +1070,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
}
rr.Labels = uint8(i)
+
<-c // zBlank
l = <-c
i, err = strconv.ParseUint(l.token, 10, 32)
@@ -1167,6 +1078,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
}
rr.OrigTtl = uint32(i)
+
<-c // zBlank
l = <-c
if i, err := StringToTime(l.token); err != nil {
@@ -1180,6 +1092,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
} else {
rr.Expiration = i
}
+
<-c // zBlank
l = <-c
if i, err := StringToTime(l.token); err != nil {
@@ -1191,6 +1104,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
} else {
rr.Inception = i
}
+
<-c // zBlank
l = <-c
i, err = strconv.ParseUint(l.token, 10, 16)
@@ -1198,25 +1112,22 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
}
rr.KeyTag = uint16(i)
+
<-c // zBlank
l = <-c
rr.SignerName = l.token
- if l.token == "@" {
- rr.SignerName = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad RRSIG SignerName", l}, ""
- }
- if rr.SignerName[l.length-1] != '.' {
- rr.SignerName = appendOrigin(rr.SignerName, o)
- }
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return nil, &ParseError{f, "bad RRSIG SignerName", l}, ""
}
+ rr.SignerName = name
+
s, e, c1 := endingToString(c, "bad RRSIG Signature", f)
if e != nil {
return nil, e, c1
}
rr.Signature = s
+
return rr, nil, c1
}
@@ -1226,20 +1137,15 @@ func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l := <-c
rr.NextDomain = l.token
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
- if l.token == "@" {
- rr.NextDomain = o
- } else {
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
- return nil, &ParseError{f, "bad NSEC NextDomain", l}, ""
- }
- if rr.NextDomain[l.length-1] != '.' {
- rr.NextDomain = appendOrigin(rr.NextDomain, o)
- }
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return nil, &ParseError{f, "bad NSEC NextDomain", l}, ""
}
+ rr.NextDomain = name
rr.TypeBitMap = make([]uint16, 0)
var (
@@ -1271,9 +1177,10 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
@@ -1339,9 +1246,10 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
@@ -1373,9 +1281,10 @@ func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
if l.length != 17 || l.err {
return nil, &ParseError{f, "bad EUI48 Address", l}, ""
}
@@ -1405,9 +1314,10 @@ func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
if l.length != 23 || l.err {
return nil, &ParseError{f, "bad EUI64 Address", l}, ""
}
@@ -1437,9 +1347,10 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
@@ -1466,9 +1377,10 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
@@ -1522,9 +1434,10 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad RKEY Flags", l}, ""
@@ -1577,10 +1490,12 @@ func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(GPOS)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
_, e := strconv.ParseFloat(l.token, 64)
if e != nil || l.err {
return nil, &ParseError{f, "bad GPOS Longitude", l}, ""
@@ -1606,10 +1521,12 @@ func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
rr := new(DS)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
@@ -1665,10 +1582,12 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(TA)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad TA KeyTag", l}, ""
@@ -1703,10 +1622,12 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(TLSA)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad TLSA Usage", l}, ""
@@ -1738,10 +1659,12 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(SMIMEA)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA Usage", l}, ""
@@ -1773,10 +1696,12 @@ func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(RFC3597)
rr.Hdr = h
+
l := <-c
if l.token != "\\#" {
return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
}
+
<-c // zBlank
l = <-c
rdlength, e := strconv.Atoi(l.token)
@@ -1850,7 +1775,7 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 { // Dynamic updates.
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
@@ -1897,9 +1822,10 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NID Preference", l}, ""
@@ -1920,9 +1846,10 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad L32 Preference", l}, ""
@@ -1942,31 +1869,25 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad LP Preference", l}, ""
}
rr.Preference = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Fqdn = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Fqdn = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
return nil, &ParseError{f, "bad LP Fqdn", l}, ""
}
- if rr.Fqdn[l.length-1] != '.' {
- rr.Fqdn = appendOrigin(rr.Fqdn, o)
- }
+ rr.Fqdn = name
+
return rr, nil, ""
}
@@ -1975,9 +1896,10 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad L64 Preference", l}, ""
@@ -1996,10 +1918,12 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(UID)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad UID Uid", l}, ""
@@ -2011,10 +1935,12 @@ func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(GID)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad GID Gid", l}, ""
@@ -2026,6 +1952,7 @@ func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(UINFO)
rr.Hdr = h
+
s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f)
if e != nil {
return nil, e, c1
@@ -2042,55 +1969,46 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, ""
}
+
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad PX Preference", l}, ""
}
rr.Preference = uint16(i)
+
<-c // zBlank
l = <-c // zString
rr.Map822 = l.token
- if l.length == 0 {
- return rr, nil, ""
- }
- if l.token == "@" {
- rr.Map822 = o
- return rr, nil, ""
- }
- _, ok := IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+ map822, map822Ok := toAbsoluteName(l.token, o)
+ if l.err || !map822Ok {
return nil, &ParseError{f, "bad PX Map822", l}, ""
}
- if rr.Map822[l.length-1] != '.' {
- rr.Map822 = appendOrigin(rr.Map822, o)
- }
+ rr.Map822 = map822
+
<-c // zBlank
l = <-c // zString
rr.Mapx400 = l.token
- if l.token == "@" {
- rr.Mapx400 = o
- return rr, nil, ""
- }
- _, ok = IsDomainName(l.token)
- if !ok || l.length == 0 || l.err {
+ mapx400, mapx400Ok := toAbsoluteName(l.token, o)
+ if l.err || !mapx400Ok {
return nil, &ParseError{f, "bad PX Mapx400", l}, ""
}
- if rr.Mapx400[l.length-1] != '.' {
- rr.Mapx400 = appendOrigin(rr.Mapx400, o)
- }
+ rr.Mapx400 = mapx400
+
return rr, nil, ""
}
func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(CAA)
rr.Hdr = h
+
l := <-c
- if l.length == 0 {
+ if l.length == 0 { // dynamic update rr.
return rr, nil, l.comment
}
+
i, err := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return nil, &ParseError{f, "bad CAA Flag", l}, ""
diff --git a/vendor/github.com/miekg/dns/scan_test.go b/vendor/github.com/miekg/dns/scan_test.go
index b31c4c779..e43ad4478 100644
--- a/vendor/github.com/miekg/dns/scan_test.go
+++ b/vendor/github.com/miekg/dns/scan_test.go
@@ -21,13 +21,16 @@ func TestParseZoneInclude(t *testing.T) {
t.Fatalf("could not close tmpfile %q: %s", tmpfile.Name(), err)
}
- zone := "$INCLUDE " + tmpfile.Name()
+ zone := "$ORIGIN example.org.\n$INCLUDE " + tmpfile.Name()
tok := ParseZone(strings.NewReader(zone), "", "")
for x := range tok {
if x.Error != nil {
t.Fatalf("expected no error, but got %s", x.Error)
}
+ if x.RR.Header().Name != "foo.example.org." {
+ t.Fatalf("expected %s, but got %s", "foo.example.org.", x.RR.Header().Name)
+ }
}
os.Remove(tmpfile.Name())
diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go
index f17a2f90f..b74f2f1a8 100644
--- a/vendor/github.com/miekg/dns/server_test.go
+++ b/vendor/github.com/miekg/dns/server_test.go
@@ -30,6 +30,16 @@ func HelloServerBadID(w ResponseWriter, req *Msg) {
w.WriteMsg(m)
}
+func HelloServerEchoAddrPort(w ResponseWriter, req *Msg) {
+ m := new(Msg)
+ m.SetReply(req)
+
+ remoteAddr := w.RemoteAddr().String()
+ m.Extra = make([]RR, 1)
+ m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{remoteAddr}}
+ w.WriteMsg(m)
+}
+
func AnotherHelloServer(w ResponseWriter, req *Msg) {
m := new(Msg)
m.SetReply(req)
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
index af111b9a8..12a209678 100644
--- a/vendor/github.com/miekg/dns/udp.go
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -27,8 +27,19 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
return n, &SessionUDP{raddr, oob[:oobn]}, err
}
-// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
- n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
+ oob := correctSource(session.context)
+ n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
return n, err
}
+
+// correctSource takes oob data and returns new oob data with the Src equal to the Dst
+func correctSource(oob []byte) []byte {
+ dst, err := parseUDPSocketDst(oob)
+ // If the destination could not be determined, ignore.
+ if err != nil || dst == nil {
+ return nil
+ }
+ return marshalUDPSocketSrc(dst)
+}
diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go
index 033df4239..13747ed34 100644
--- a/vendor/github.com/miekg/dns/udp_linux.go
+++ b/vendor/github.com/miekg/dns/udp_linux.go
@@ -13,8 +13,34 @@ package dns
import (
"net"
"syscall"
+ "unsafe"
+
+ "github.com/miekg/dns/internal/socket"
+)
+
+const (
+ sizeofInet6Pktinfo = 0x14
+ sizeofInetPktinfo = 0xc
+ protocolIP = 0
+ protocolIPv6 = 41
)
+type inetPktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex int32
+}
+
+type inetControlMessage struct {
+ Src net.IP // source address, specifying only
+ Dst net.IP // destination address, receiving only
+}
+
// setUDPSocketOptions sets the UDP socket options.
// This function is implemented on a per platform basis. See udp_*.go for more details
func setUDPSocketOptions(conn *net.UDPConn) error {
@@ -103,3 +129,92 @@ func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
defer file.Close()
return syscall.Getsockname(int(file.Fd()))
}
+
+// marshalInetPacketInfo marshals a ipv4 control message, returning
+// the byte slice for the next marshal, if any
+func marshalInetPacketInfo(b []byte, cm *inetControlMessage) []byte {
+ m := socket.ControlMessage(b)
+ m.MarshalHeader(protocolIP, syscall.IP_PKTINFO, sizeofInetPktinfo)
+ if cm != nil {
+ pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0]))
+ if ip := cm.Src.To4(); ip != nil {
+ copy(pi.Spec_dst[:], ip)
+ }
+ }
+ return m.Next(sizeofInetPktinfo)
+}
+
+// marshalInet6PacketInfo marshals a ipv6 control message, returning
+// the byte slice for the next marshal, if any
+func marshalInet6PacketInfo(b []byte, cm *inetControlMessage) []byte {
+ m := socket.ControlMessage(b)
+ m.MarshalHeader(protocolIPv6, syscall.IPV6_PKTINFO, sizeofInet6Pktinfo)
+ if cm != nil {
+ pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0]))
+ if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
+ copy(pi.Addr[:], ip)
+ }
+ }
+ return m.Next(sizeofInet6Pktinfo)
+}
+
+func parseInetPacketInfo(cm *inetControlMessage, b []byte) {
+ pi := (*inetPktinfo)(unsafe.Pointer(&b[0]))
+ if len(cm.Dst) < net.IPv4len {
+ cm.Dst = make(net.IP, net.IPv4len)
+ }
+ copy(cm.Dst, pi.Addr[:])
+}
+
+func parseInet6PacketInfo(cm *inetControlMessage, b []byte) {
+ pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0]))
+ if len(cm.Dst) < net.IPv6len {
+ cm.Dst = make(net.IP, net.IPv6len)
+ }
+ copy(cm.Dst, pi.Addr[:])
+}
+
+// parseUDPSocketDst takes out-of-band data from ReadMsgUDP and parses it for
+// the Dst address
+func parseUDPSocketDst(oob []byte) (net.IP, error) {
+ cm := new(inetControlMessage)
+ ms, err := socket.ControlMessage(oob).Parse()
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range ms {
+ lvl, typ, l, err := m.ParseHeader()
+ if err != nil {
+ return nil, err
+ }
+ if lvl == protocolIPv6 { // IPv6
+ if typ == syscall.IPV6_PKTINFO && l >= sizeofInet6Pktinfo {
+ parseInet6PacketInfo(cm, m.Data(l))
+ }
+ } else if lvl == protocolIP { // IPv4
+ if typ == syscall.IP_PKTINFO && l >= sizeofInetPktinfo {
+ parseInetPacketInfo(cm, m.Data(l))
+ }
+ }
+ }
+ return cm.Dst, nil
+}
+
+// marshalUDPSocketSrc takes the given src address and returns out-of-band data
+// to give to WriteMsgUDP
+func marshalUDPSocketSrc(src net.IP) []byte {
+ var oob []byte
+ // If the dst is definitely an ipv6, then use ipv6 control to respond
+ // otherwise use ipv4 because the ipv6 marshal ignores ipv4 messages.
+ // See marshalInet6PacketInfo
+ cm := new(inetControlMessage)
+ cm.Src = src
+ if src.To4() == nil {
+ oob = make([]byte, socket.ControlMessageSpace(sizeofInet6Pktinfo))
+ marshalInet6PacketInfo(oob, cm)
+ } else {
+ oob = make([]byte, socket.ControlMessageSpace(sizeofInetPktinfo))
+ marshalInetPacketInfo(oob, cm)
+ }
+ return oob
+}
diff --git a/vendor/github.com/miekg/dns/udp_linux_test.go b/vendor/github.com/miekg/dns/udp_linux_test.go
new file mode 100644
index 000000000..14a8acefd
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_linux_test.go
@@ -0,0 +1,68 @@
+// +build linux,!appengine
+
+package dns
+
+import (
+ "bytes"
+ "net"
+ "testing"
+)
+
+func TestParseUDPSocketDst(t *testing.T) {
+ // dst is :ffff:100.100.100.100
+ oob := []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 100, 100, 100, 2, 0, 0, 0}
+ dst, err := parseUDPSocketDst(oob)
+ if err != nil {
+ t.Fatalf("error parsing ipv6 oob: %v", err)
+ }
+ dst4 := dst.To4()
+ if dst4 == nil {
+ t.Errorf("failed to parse ipv4: %v", dst)
+ } else if dst4.String() != "100.100.100.100" {
+ t.Errorf("unexpected ipv4: %v", dst4)
+ }
+
+ // dst is 2001:db8::1
+ oob = []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0}
+ dst, err = parseUDPSocketDst(oob)
+ if err != nil {
+ t.Fatalf("error parsing ipv6 oob: %v", err)
+ }
+ dst6 := dst.To16()
+ if dst6 == nil {
+ t.Errorf("failed to parse ipv6: %v", dst)
+ } else if dst6.String() != "2001:db8::1" {
+ t.Errorf("unexpected ipv6: %v", dst4)
+ }
+
+ // dst is 100.100.100.100 but was received on 10.10.10.10
+ oob = []byte{28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 10, 10, 10, 10, 100, 100, 100, 100, 0, 0, 0, 0}
+ dst, err = parseUDPSocketDst(oob)
+ if err != nil {
+ t.Fatalf("error parsing ipv4 oob: %v", err)
+ }
+ dst4 = dst.To4()
+ if dst4 == nil {
+ t.Errorf("failed to parse ipv4: %v", dst)
+ } else if dst4.String() != "100.100.100.100" {
+ t.Errorf("unexpected ipv4: %v", dst4)
+ }
+}
+
+func TestMarshalUDPSocketSrc(t *testing.T) {
+ // src is 100.100.100.100
+ exoob := []byte{28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 0}
+ oob := marshalUDPSocketSrc(net.ParseIP("100.100.100.100"))
+ if !bytes.Equal(exoob, oob) {
+ t.Errorf("expected ipv4 oob:\n%v", exoob)
+ t.Errorf("actual ipv4 oob:\n%v", oob)
+ }
+
+ // src is 2001:db8::1
+ exoob = []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}
+ oob = marshalUDPSocketSrc(net.ParseIP("2001:db8::1"))
+ if !bytes.Equal(exoob, oob) {
+ t.Errorf("expected ipv6 oob:\n%v", exoob)
+ t.Errorf("actual ipv6 oob:\n%v", oob)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go
index 488a282b2..531f4ebcc 100644
--- a/vendor/github.com/miekg/dns/udp_other.go
+++ b/vendor/github.com/miekg/dns/udp_other.go
@@ -9,7 +9,9 @@ import (
// These do nothing. See udp_linux.go for an example of how to implement this.
// We tried to adhire to some kind of naming scheme.
-func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
-func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
-func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
-func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
+func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
+func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
+func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
+func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
+func parseUDPSocketDst(oob []byte) (net.IP, error) { return nil, nil }
+func marshalUDPSocketSrc(src net.IP) []byte { return nil }
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
index 51e532ac2..2ad4ede7f 100644
--- a/vendor/github.com/miekg/dns/udp_windows.go
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -4,10 +4,12 @@ package dns
import "net"
+// SessionUDP holds the remote address
type SessionUDP struct {
raddr *net.UDPAddr
}
+// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
@@ -21,9 +23,8 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
return n, session, err
}
-// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, err := conn.WriteTo(b, session.raddr)
return n, err
}
-
diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml
index 9805f7ec8..3d260fa61 100644
--- a/vendor/github.com/minio/minio-go/.travis.yml
+++ b/vendor/github.com/minio/minio-go/.travis.yml
@@ -18,6 +18,7 @@ script:
- diff -au <(gofmt -d .) <(printf "")
- go get -u github.com/cheggaaa/pb/...
- go get -u github.com/sirupsen/logrus/...
+- go get -u github.com/dustin/go-humanize/...
- go vet ./...
- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
index 6dbef6265..e2a957137 100644
--- a/vendor/github.com/minio/minio-go/MAINTAINERS.md
+++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md
@@ -5,7 +5,6 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
-
Edit `libraryVersion` constant in `api.go`.
```
@@ -13,7 +12,23 @@ $ grep libraryVersion api.go
libraryVersion = "0.3.0"
```
+Commit your changes
+```
+$ git commit -a -m "Bump to new release 0.3.0" --author "Minio Trusted <trusted@minio.io>"
+```
+
+Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
```
-$ git tag 0.3.0
+$ export GNUPGHOME=/path/to/trusted/key
+$ git tag -s 0.3.0
+$ git push
$ git push --tags
-``` \ No newline at end of file
+```
+
+### Announce
+Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
+
+To generate `changelog`
+```sh
+git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <latest_release_tag>..<last_release_tag>
+```
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index 8f3c69668..5eb6656d5 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -130,7 +130,6 @@ The full API Reference is available here.
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
-
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
@@ -140,25 +139,21 @@ The full API Reference is available here.
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
-
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API Reference : Bucket notification Operations
-
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
### API Reference : File Object Operations
-
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
### API Reference : Object Operations
-
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
@@ -169,14 +164,13 @@ The full API Reference is available here.
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference: Encrypted Object Operations
-
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
### API Reference : Presigned Operations
-
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
@@ -185,11 +179,9 @@ The full API Reference is available here.
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
-
## Full Examples
-#### Full Examples : Bucket Operations
-
+### Full Examples : Bucket Operations
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
@@ -198,26 +190,22 @@ The full API Reference is available here.
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
-#### Full Examples : Bucket policy Operations
-
+### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
-#### Full Examples : Bucket notification Operations
-
+### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
-#### Full Examples : File Object Operations
-
+### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
-#### Full Examples : Object Operations
-
+### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
@@ -226,14 +214,14 @@ The full API Reference is available here.
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
-#### Full Examples : Encrypted Object Operations
-
+### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
-#### Full Examples : Presigned Operations
+### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
@@ -242,7 +230,6 @@ The full API Reference is available here.
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
## Contribute
-
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index 8cfcb55fb..c645828df 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -84,19 +84,35 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
}
// PresignedGetObject - Returns a presigned URL to access an object
-// without credentials. Expires maximum is 7days - ie. 604800 and
-// minimum is 1. Additionally you can override a set of response
-// headers using the query parameters.
+// data without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
-// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
+// PresignedHeadObject - Returns a presigned URL to access object
+// metadata without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object
+// without credentials. URL can have a maximum expiry of upto 7days
+// or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
+// Presign - returns a presigned URL for any http method of your choice
+// along with custom request params. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec.
+func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL(method, bucketName, objectName, expires, reqParams)
+}
+
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index 6e0015acc..aefeb5f26 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -24,10 +24,10 @@ import (
"io/ioutil"
"net/http"
"net/url"
+ "runtime/debug"
"sort"
"strconv"
"strings"
- "sync"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -51,16 +51,6 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
return n, err
}
-// Pool to manage re-usable memory for upload objects
-// with streams with unknown size.
-var bufPool = sync.Pool{
- New: func() interface{} {
- _, partSize, _, _ := optimalPartInfo(-1)
- b := make([]byte, partSize)
- return &b
- },
-}
-
func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
@@ -78,7 +68,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
- totalPartsCount, _, _, err := optimalPartInfo(-1)
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
if err != nil {
return 0, err
}
@@ -101,38 +91,39 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials()
- bufp := bufPool.Get().(*[]byte)
- length, rErr := io.ReadFull(reader, *bufp)
+ length, rErr := io.ReadFull(reader, buf)
if rErr == io.EOF {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF {
- bufPool.Put(bufp)
return 0, rErr
}
// Calculates hash sums while copying partSize bytes into cw.
for k, v := range hashAlgos {
- v.Write((*bufp)[:length])
+ v.Write(buf[:length])
hashSums[k] = v.Sum(nil)
}
// Update progress reader appropriately to the latest offset
// as we read from the source.
- rd := newHook(bytes.NewReader((*bufp)[:length]), progress)
+ rd := newHook(bytes.NewReader(buf[:length]), progress)
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
hashSums["md5"], hashSums["sha256"], int64(length), metadata)
if err != nil {
- bufPool.Put(bufp)
return totalUploadedSize, err
}
@@ -145,9 +136,6 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Increment part number.
partNumber++
- // Put back data into bufpool.
- bufPool.Put(bufp)
-
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if rErr == io.EOF {
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
index 0d4639e83..40cd5c252 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -153,7 +153,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= totalWorkers; w++ {
- go func() {
+ go func(partSize int64) {
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
@@ -197,7 +197,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
Error: nil,
}
}
- }()
+ }(partSize)
}
// Gather the responses as they occur and update any
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index f4107132e..94db82593 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -23,6 +23,7 @@ import (
"os"
"reflect"
"runtime"
+ "runtime/debug"
"sort"
"strings"
@@ -233,7 +234,7 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
- totalPartsCount, _, _, err := optimalPartInfo(-1)
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
if err != nil {
return 0, err
}
@@ -256,27 +257,28 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
for partNumber <= totalPartsCount {
- bufp := bufPool.Get().(*[]byte)
- length, rErr := io.ReadFull(reader, *bufp)
+ length, rErr := io.ReadFull(reader, buf)
if rErr == io.EOF {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF {
- bufPool.Put(bufp)
return 0, rErr
}
// Update progress reader appropriately to the latest offset
// as we read from the source.
- rd := newHook(bytes.NewReader((*bufp)[:length]), progress)
+ rd := newHook(bytes.NewReader(buf[:length]), progress)
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
nil, nil, int64(length), metadata)
if err != nil {
- bufPool.Put(bufp)
return totalUploadedSize, err
}
@@ -289,9 +291,6 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
// Increment part number.
partNumber++
- // Put back data into bufpool.
- bufPool.Put(bufp)
-
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if rErr == io.EOF {
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 811f89a3f..946a58869 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -87,7 +87,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "3.0.0"
+ libraryVersion = "3.0.3"
)
// User Agent should always following the below style.
@@ -190,6 +190,31 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
return nil
}
+// getRegionFromURL - parse region from URL if present.
+func getRegionFromURL(u url.URL) (region string) {
+ region = ""
+ if s3utils.IsGoogleEndpoint(u) {
+ return
+ } else if s3utils.IsAmazonChinaEndpoint(u) {
+ // For china specifically we need to set everything to
+ // cn-north-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // China region.
+ return "cn-north-1"
+ } else if s3utils.IsAmazonGovCloudEndpoint(u) {
+ // For us-gov specifically we need to set everything to
+ // us-gov-west-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // Gov cloud region.
+ return "us-gov-west-1"
+ }
+ parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
+ if len(parts) > 1 {
+ region = parts[1]
+ }
+ return region
+}
+
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
@@ -216,6 +241,9 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
}
// Sets custom region, if region is empty bucket location cache is used automatically.
+ if region == "" {
+ region = getRegionFromURL(clnt.endpointURL)
+ }
clnt.region = region
// Instantiate bucket location cache.
@@ -494,7 +522,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
- for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
+ for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@@ -562,9 +590,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
- if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
+ if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
+ if metadata.bucketName != "" && errResponse.Region != "" {
+ // Gather Cached location only if bucketName is present.
+ if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ }
}
}
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
index 2a9db3cb6..f15a6eed3 100644
--- a/vendor/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -164,6 +164,50 @@ func TestGetReaderSize(t *testing.T) {
}
}
+// Tests get region from host URL.
+func TestGetRegionFromURL(t *testing.T) {
+ testCases := []struct {
+ u url.URL
+ expectedRegion string
+ }{
+ {
+ u: url.URL{Host: "storage.googleapis.com"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
+ expectedRegion: "cn-north-1",
+ },
+ {
+ u: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "s3-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "192.168.1.1"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3-eu-west-1.amazonaws.com"},
+ expectedRegion: "eu-west-1",
+ },
+ {
+ u: url.URL{Host: "s3.amazonaws.com"},
+ expectedRegion: "",
+ },
+ }
+
+ for i, testCase := range testCases {
+ region := getRegionFromURL(testCase.u)
+ if testCase.expectedRegion != region {
+ t.Errorf("Test %d: Expected region %s, got %s", i+1, testCase.expectedRegion, region)
+ }
+ }
+}
+
// Tests valid hosts for location.
func TestValidBucketLocation(t *testing.T) {
s3Hosts := []struct {
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
index 4f5c1b390..0f623d3d4 100644
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -21,6 +21,7 @@ install:
- go get -u github.com/minio/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
+ - go get -u github.com/dustin/go-humanize
# to run your custom scripts instead of automatic MSBuild
build_script:
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 748fd01ee..3ad06da3a 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -91,20 +91,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return c.region, nil
}
- if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- return "cn-north-1", nil
- } else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
- // For us-gov specifically we need to set everything to
- // us-gov-west-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // Gov cloud region.
- return "us-gov-west-1", nil
- }
-
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index 902a975d9..bfdd42db6 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -54,11 +54,12 @@ func main() {
| :--- | :--- | :--- | :--- | :--- | :--- |
| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
-| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
-| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
-| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
-| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`NewSSEInfo`](#NewSSEInfo) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
-| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
+| [`BucketExists`](#BucketExists) | [`PutObjectStreaming`](#PutObjectStreaming) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) | [`CopyObject`](#CopyObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`StatObject`](#StatObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObject`](#RemoveObject) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveObjects`](#RemoveObjects) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
+| | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | | |
| | [`FPutObject`](#FPutObject) | | | | |
| | [`FGetObject`](#FGetObject) | | | | |
| | [`ComposeObject`](#ComposeObject) | | | | |
@@ -1067,8 +1068,6 @@ Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may p
NOTE: you can upload to S3 only with specified object name.
-
-
__Parameters__
@@ -1093,6 +1092,37 @@ if err != nil {
fmt.Println(presignedURL)
```
+<a name="PresignedHeadObject"></a>
+### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+
+Generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
+
+__Parameters__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
+|`reqParams` | _url.Values_ |Additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
+
+
+__Example__
+
+
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedHeadObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
<a name="PresignedPostPolicy"></a>
### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
@@ -1100,7 +1130,6 @@ Allows setting policy conditions to a presigned URL for POST operations. Policie
Create policy :
-
```go
policy := minio.NewPostPolicy()
```
@@ -1128,10 +1157,8 @@ if err != nil {
}
```
-
POST your content from the command line using `curl`:
-
```go
fmt.Printf("curl ")
for k, v := range formData {
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
new file mode 100644
index 000000000..0332049e5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
@@ -0,0 +1,53 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Set request parameters
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+ // Gernerate presigned get object url.
+ presignedURL, err := s3Client.PresignedHeadObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(presignedURL)
+}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
index 3d16da4fe..ec554e4fe 100644
--- a/vendor/github.com/minio/minio-go/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -21,6 +21,7 @@ package main
import (
"bytes"
"encoding/hex"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -29,23 +30,24 @@ import (
"net/http"
"net/url"
"os"
- "path"
"reflect"
- "runtime"
"strconv"
"strings"
"time"
minio "github.com/minio/minio-go"
log "github.com/sirupsen/logrus"
- logrus "github.com/sirupsen/logrus"
+ "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/policy"
)
-// MinPartSize ... Minimum part size
-const MinPartSize = 1024 * 1024 * 64
+const (
+ sixtyFiveMiB = 65 * humanize.MiByte // 65MiB
+ thirtyThreeKiB = 33 * humanize.KiByte // 33KiB
+)
+
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
@@ -59,6 +61,64 @@ const (
enableHTTPS = "ENABLE_HTTPS"
)
+type mintJSONFormatter struct {
+}
+
+func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
+ data := make(log.Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+
+// log successful test runs
+func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ // log with the fields as per mint
+ fields := log.Fields{"name": "minio-go", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "pass"}
+ return log.WithFields(fields)
+}
+
+// log failed test runs
+func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ var fields log.Fields
+ // log with the fields as per mint
+ if err != nil {
+ fields = log.Fields{"name": "minio-go", "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message, "error": err}
+ } else {
+ fields = log.Fields{"name": "minio-go", "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message}
+ }
+ return log.WithFields(fields)
+}
+
+// log not applicable test runs
+func ignoredLog(function string, args map[string]interface{}, startTime time.Time, message string) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ // log with the fields as per mint
+ fields := log.Fields{"name": "minio-go", "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "na", "message": message}
+ return log.WithFields(fields)
+}
+
func init() {
// If server endpoint is not set, all tests default to
// using https://play.minio.io:9000
@@ -85,14 +145,35 @@ func getFilePath(filename string) (filepath string) {
return
}
+type sizedReader struct {
+ io.Reader
+ size int
+}
+
+func (l *sizedReader) Size() int {
+ return l.size
+}
+
+func (l *sizedReader) Close() error {
+ return nil
+}
+
+type randomReader struct{ seed []byte }
+
+func (r *randomReader) Read(b []byte) (int, error) {
+ return copy(b, bytes.Repeat(r.seed, len(b))), nil
+}
+
// read data from file if it exists or optionally create a buffer of particular size
-func getDataBuffer(fileName string, size int) (buf []byte) {
+func getDataReader(fileName string, size int) io.ReadCloser {
if _, err := os.Stat(getFilePath(fileName)); os.IsNotExist(err) {
- buf = bytes.Repeat([]byte(string('a')), size)
- return
+ return &sizedReader{
+ Reader: io.LimitReader(&randomReader{seed: []byte("a")}, int64(size)),
+ size: size,
+ }
}
- buf, _ = ioutil.ReadFile(getFilePath(fileName))
- return
+ reader, _ := os.Open(getFilePath(fileName))
+ return reader
}
// randString generates random names and prepends them with a known prefix.
@@ -119,10 +200,20 @@ func isQuickMode() bool {
// Tests bucket re-create errors.
func testMakeBucketError() {
- logger().Info()
+ region := "eu-central-1"
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // skipping region functional tests for non s3 runs
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- logger().Info("skipping region functional tests for non s3 runs")
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -137,7 +228,7 @@ func testMakeBucketError() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -148,30 +239,42 @@ func testMakeBucketError() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal()
}
- if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
- logger().Fatal("Error: make bucket should should fail for", bucketName)
+ if err = c.MakeBucket(bucketName, region); err == nil {
+ failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal()
}
// Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- logger().Fatal("Error: Invalid error returned by server", err)
+ failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
}
if err = c.RemoveBucket(bucketName); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
}
+
+ successLogger(function, args, startTime).Info()
}
// Tests various bucket supported formats.
func testMakeBucketRegions() {
- logger().Info()
+ region := "eu-central-1"
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+ // skipping region functional tests for non s3 runs
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- logger().Info("skipping region functional tests for non s3 runs")
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -186,7 +289,7 @@ func testMakeBucketRegions() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -197,32 +300,44 @@ func testMakeBucketRegions() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
if err = c.RemoveBucket(bucketName); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
}
// Make a new bucket with '.' in its name, in 'us-west-2'. This
// request is internally staged into a path style instead of
// virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- logger().Fatal("Error:", err, bucketName+".withperiod")
+ region = "us-west-2"
+ args["region"] = region
+ if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Remove the newly created bucket.
if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- logger().Fatal("Error:", err, bucketName+".withperiod")
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
}
+
+ successLogger(function, args, startTime).Info()
}
// Test PutObject using a large data to trigger multipart readat
func testPutObjectReadAt() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObject(bucketName, objectName, reader, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "objectContentType": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -235,7 +350,7 @@ func testPutObjectReadAt() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -246,70 +361,86 @@ func testPutObjectReadAt() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
}
// Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
// Use different data for each part for multipart tests to ensure part order at the end.
- var buf = getDataBuffer("datafile-65-MB", MinPartSize)
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
// Object content type
objectContentType := "binary/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ n, err := c.PutObject(bucketName, objectName, reader, objectContentType)
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), objectContentType)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Get Object failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+ if st.Size != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
}
if st.ContentType != objectContentType {
- logger().Fatalf("Error: Content types don't match, expected: %+v, found: %+v\n", objectContentType, st.ContentType)
+ failureLog(function, args, startTime, "", "Content types don't match", err).Fatal()
}
if err := r.Close(); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
}
if err := r.Close(); err == nil {
- logger().Fatal("Error: object is already closed, should return error")
+ failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+
+ successLogger(function, args, startTime).Info()
}
// Test PutObject using a large data to trigger multipart readat
func testPutObjectWithMetadata() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithMetadata(bucketName, objectName, reader, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "metadata": "",
+ }
+
if isQuickMode() {
- logger().Info("skipping functional tests for short runs")
+ ignoredLog(function, args, startTime, "Skipping functional tests for short runs").Info()
return
}
@@ -324,7 +455,7 @@ func testPutObjectWithMetadata() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -335,72 +466,85 @@ func testPutObjectWithMetadata() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
}
// Generate data using 2 parts
// Use different data in each part for multipart tests to ensure part order at the end.
- var buf = getDataBuffer("datafile-65-MB", MinPartSize)
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
// Object custom metadata
customContentType := "custom/contenttype"
- n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{
+ n, err := c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
"Content-Type": {customContentType},
}, nil)
+ args["metadata"] = map[string][]string{
+ "Content-Type": {customContentType},
+ }
+
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+ if st.Size != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
}
if st.ContentType != customContentType {
- logger().Fatalf("Error: Expected and found content types do not match, want %v, got %v\n",
- customContentType, st.ContentType)
+ failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal()
}
if err := r.Close(); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
}
if err := r.Close(); err == nil {
- logger().Fatal("Error: object is already closed, should return error")
+ failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal()
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- logger().Fatal("Error: ", err)
+ if err = c.RemoveObject(bucketName, objectName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- logger().Fatal("Error:", err)
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+
+ successLogger(function, args, startTime).Info()
}
// Test put object with streaming signature.
func testPutObjectStreaming() {
- logger().Info()
+ // initialize logging params
+ objectName := "test-object"
+ startTime := time.Now()
+ function := "PutObjectStreaming(bucketName, objectName, reader)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": objectName,
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -413,7 +557,7 @@ func testPutObjectStreaming() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -425,44 +569,52 @@ func testPutObjectStreaming() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
"minio-go-test")
-
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Upload an object.
sizes := []int64{0, 64*1024 - 1, 64 * 1024}
- objectName := "test-object"
- for i, size := range sizes {
+
+ for _, size := range sizes {
data := bytes.Repeat([]byte("a"), int(size))
n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
if err != nil {
- logger().Fatalf("Test %d Error: %v %s %s", i+1, err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
}
if n != size {
- log.Error(fmt.Errorf("Test %d Expected upload object size %d but got %d", i+1, size, n))
+ failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal()
}
}
// Remove the object.
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
// Remove the bucket.
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test listing partially uploaded objects.
func testListPartiallyUploaded() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "isRecursive": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -475,7 +627,7 @@ func testListPartiallyUploaded() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Set user agent.
@@ -486,22 +638,23 @@ func testListPartiallyUploaded() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- r := bytes.NewReader(bytes.Repeat([]byte("0"), MinPartSize*2))
+ r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2))
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
- _, cerr := io.CopyN(writer, r, (MinPartSize*2)/25)
+ _, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25)
if cerr != nil {
- logger().Fatal("Error:", cerr, bucketName)
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
i++
r.Seek(0, 0)
@@ -510,33 +663,44 @@ func testListPartiallyUploaded() {
}()
objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
- logger().Fatal("Error: PutObject should fail.")
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
}
if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal()
}
doneCh := make(chan struct{})
defer close(doneCh)
isRecursive := true
+ args["isRecursive"] = isRecursive
+
multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
- logger().Fatalf("Error: Error when listing incomplete upload")
+ failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal()
}
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test get object seeker from the end, using whence set to '2'.
func testGetObjectSeekEnd() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -549,7 +713,7 @@ func testGetObjectSeekEnd() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -560,75 +724,93 @@ func testGetObjectSeekEnd() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
}
pos, err := r.Seek(-100, 2)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
}
if pos != st.Size-100 {
- logger().Fatalf("Expected %d, got %d instead", pos, st.Size-100)
+ failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
}
buf2 := make([]byte, 100)
m, err := io.ReadFull(r, buf2)
if err != nil {
- logger().Fatal("Error: reading through io.ReadFull", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal()
}
if m != len(buf2) {
- logger().Fatalf("Expected %d bytes, got %d", len(buf2), m)
+ failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
}
hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
if hexBuf1 != hexBuf2 {
- logger().Fatalf("Expected %s, got %s instead", hexBuf1, hexBuf2)
+ failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal()
}
pos, err = r.Seek(-100, 2)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
}
if pos != st.Size-100 {
- logger().Fatalf("Expected %d, got %d instead", pos, st.Size-100)
+ failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
}
if err = r.Close(); err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test get object reader to not throw error on being closed twice.
func testGetObjectClosedTwice() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -641,7 +823,7 @@ func testGetObjectClosedTwice() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -652,60 +834,71 @@ func testGetObjectClosedTwice() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
}
if err := r.Close(); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
}
if err := r.Close(); err == nil {
- logger().Fatal("Error: object is already closed, should return error")
+ failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test removing multiple objects with Remove API
func testRemoveMultipleObjects() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -718,7 +911,7 @@ func testRemoveMultipleObjects() {
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Set user agent.
@@ -729,11 +922,12 @@ func testRemoveMultipleObjects() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
@@ -750,7 +944,7 @@ func testRemoveMultipleObjects() {
objectName := "sample" + strconv.Itoa(i) + ".txt"
_, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
if err != nil {
- log.Error("Error: PutObject shouldn't fail.", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
continue
}
objectsCh <- objectName
@@ -764,20 +958,27 @@ func testRemoveMultipleObjects() {
select {
case r, more := <-errorCh:
if more {
- logger().Fatalf("Unexpected error, objName(%v) err(%v)", r.ObjectName, r.Err)
+ failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal()
}
}
// Clean the bucket created by the test
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests removing partially uploaded objects.
func testRemovePartiallyUploaded() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -790,7 +991,7 @@ func testRemovePartiallyUploaded() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Set user agent.
@@ -801,11 +1002,12 @@ func testRemovePartiallyUploaded() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
@@ -816,7 +1018,7 @@ func testRemovePartiallyUploaded() {
for i < 25 {
_, cerr := io.CopyN(writer, r, 128*1024)
if cerr != nil {
- logger().Fatal("Error:", cerr, bucketName)
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
i++
r.Seek(0, 0)
@@ -825,26 +1027,37 @@ func testRemovePartiallyUploaded() {
}()
objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
- logger().Fatal("Error: PutObject should fail.")
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
}
if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "String not found", err).Fatal()
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests FPutObject of a big file to trigger multipart
func testFPutObjectMultipart() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "objectContentType": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -857,7 +1070,7 @@ func testFPutObjectMultipart() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -868,11 +1081,12 @@ func testFPutObjectMultipart() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
@@ -881,65 +1095,74 @@ func testFPutObjectMultipart() {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
}
-
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var buffer = bytes.Repeat([]byte(string('a')), MinPartSize)
- if _, err := file.Write(buffer); err != nil {
- logger().Fatal("Error:", err)
+ _, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB))
+ if err != nil {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
- // Close the file pro-actively for windows.
err = file.Close()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File Close failed", err).Fatal()
}
fileName = file.Name()
+ args["fileName"] = fileName
}
- totalSize := MinPartSize * 1
+ totalSize := sixtyFiveMiB * 1
// Set base object name
- objectName := bucketName + "FPutObject"
+ objectName := bucketName + "FPutObject" + "-standard"
+ args["objectName"] = objectName
+
objectContentType := "testapplication/octet-stream"
+ args["objectContentType"] = objectContentType
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", fileName, objectContentType)
+ n, err := c.FPutObject(bucketName, objectName, fileName, objectContentType)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(totalSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", totalSize, n)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
- r, err := c.GetObject(bucketName, objectName+"-standard")
+ r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatalf("Unexpected error: %v\n", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
objInfo, err := r.Stat()
if err != nil {
- logger().Fatalf("Unexpected error: %v\n", err)
+ failureLog(function, args, startTime, "", "Unexpected error", err).Fatal()
}
if objInfo.Size != int64(totalSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", totalSize, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal()
}
if objInfo.ContentType != objectContentType {
- logger().Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
+ failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal()
}
// Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
+ err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
-// Tests FPutObject hidden contentType setting
+// Tests FPutObject with null contentType (default = application/octet-stream)
func testFPutObject() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -952,7 +1175,7 @@ func testFPutObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -967,7 +1190,7 @@ func testFPutObject() {
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
@@ -977,127 +1200,131 @@ func testFPutObject() {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
}
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var buffer = bytes.Repeat([]byte(string('a')), MinPartSize)
+ var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB)
if _, err = file.Write(buffer); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File write failed", err).Fatal()
}
// Close the file pro-actively for windows.
err = file.Close()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
}
fName = file.Name()
}
- var totalSize = MinPartSize * 1
+ var totalSize = sixtyFiveMiB * 1
// Set base object name
objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
n, err := c.FPutObject(bucketName, objectName+"-standard", fName, "application/octet-stream")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(totalSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", totalSize, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
}
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, "")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
}
if n != int64(totalSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", totalSize, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
}
srcFile, err := os.Open(fName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File open failed", err).Fatal()
}
defer srcFile.Close()
// Add extension to temp file name
tmpFile, err := os.Create(fName + ".gtar")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File create failed", err).Fatal()
}
defer tmpFile.Close()
_, err = io.Copy(tmpFile, srcFile)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File copy failed", err).Fatal()
}
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", "")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(totalSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", totalSize, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
}
// Check headers
rStandard, err := c.StatObject(bucketName, objectName+"-standard")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-standard")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rStandard.ContentType != "application/octet-stream" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
}
rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-Octet")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rOctet.ContentType != "application/octet-stream" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
}
rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-GTar")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rGTar.ContentType != "application/x-gtar" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal()
}
// Remove all objects and bucket and temp file
err = c.RemoveObject(bucketName, objectName+"-standard")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-Octet")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-GTar")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = os.Remove(fName + ".gtar")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
}
-
+ successLogger(function, args, startTime).Info()
}
// Tests get object ReaderSeeker interface methods.
func testGetObjectReadSeekFunctional() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1110,7 +1337,7 @@ func testGetObjectReadSeekFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -1121,53 +1348,60 @@ func testGetObjectReadSeekFunctional() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
- bufSize := len(buf)
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
// Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(bufSize) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
}
defer func() {
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
}()
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat object failed", err).Fatal()
}
- if st.Size != int64(bufSize) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
}
// This following function helps us to compare data from the reader after seek
@@ -1177,13 +1411,13 @@ func testGetObjectReadSeekFunctional() {
return
}
buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "CopyN failed", err).Fatal()
}
}
if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- logger().Fatal("Error: Incorrect read bytes v/s original buffer.")
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
}
}
@@ -1202,23 +1436,23 @@ func testGetObjectReadSeekFunctional() {
// Start from offset 0, fetch data and compare
{0, 0, 0, nil, true, 0, 0},
// Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, bufSize},
+ {2048, 0, 2048, nil, true, 2048, thirtyThreeKiB},
// Start from offset larger than possible
- {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
+ {int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0},
// Move to offset 0 without comparing
{0, 0, 0, nil, false, 0, 0},
// Move one step forward and compare
- {1, 1, 1, nil, true, 1, bufSize},
+ {1, 1, 1, nil, true, 1, thirtyThreeKiB},
// Move larger than possible
- {int64(bufSize), 1, 0, seekErr, false, 0, 0},
+ {int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0},
// Provide negative offset with CUR_SEEK
{int64(-1), 1, 0, seekErr, false, 0, 0},
// Test with whence SEEK_END and with positive offset
- {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
+ {1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0},
// Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ {-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB},
// Test with whence SEEK_END and with large negative offset
- {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
+ {-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0},
}
for i, testCase := range testCases {
@@ -1226,11 +1460,11 @@ func testGetObjectReadSeekFunctional() {
n, err := r.Seek(testCase.offset, testCase.whence)
// We expect an error
if testCase.err == seekErr && err == nil {
- logger().Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
}
// We expect a specific error
if testCase.err != seekErr && testCase.err != err {
- logger().Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
}
// If we expect an error go to the next loop
if testCase.err != nil {
@@ -1238,19 +1472,25 @@ func testGetObjectReadSeekFunctional() {
}
// Check the returned seek pos
if n != testCase.pos {
- logger().Fatalf("Test %d, error: number of bytes seeked does not match, want %v, got %v\n", i+1,
- testCase.pos, n)
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal()
}
// Compare only if shouldCmp is activated
if testCase.shouldCmp {
cmpData(r, testCase.start, testCase.end)
}
}
+ successLogger(function, args, startTime).Info()
}
// Tests get object ReaderAt interface methods.
func testGetObjectReadAtFunctional() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1263,7 +1503,7 @@ func testGetObjectReadAtFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -1274,31 +1514,40 @@ func testGetObjectReadAtFunctional() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
- // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
}
// read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
offset := int64(2048)
@@ -1311,56 +1560,56 @@ func testGetObjectReadAtFunctional() {
// Test readAt before stat is called.
m, err := r.ReadAt(buf1, offset)
if err != nil {
- logger().Fatal("Error:", err, len(buf1), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf1) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf1))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal()
}
if !bytes.Equal(buf1, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
offset += 512
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
}
m, err = r.ReadAt(buf2, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf2), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf2) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal()
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf3), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf3) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal()
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf4), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf4) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal()
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
buf5 := make([]byte, n)
@@ -1368,14 +1617,14 @@ func testGetObjectReadAtFunctional() {
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err, len(buf5))
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
}
if m != len(buf5) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal()
}
if !bytes.Equal(buf, buf5) {
- logger().Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
+ failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
}
buf6 := make([]byte, n+1)
@@ -1383,22 +1632,28 @@ func testGetObjectReadAtFunctional() {
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err, len(buf6))
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test Presigned Post Policy
func testPresignedPostPolicy() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PresignedPostPolicy(policy)"
+ args := map[string]interface{}{
+ "policy": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1411,7 +1666,7 @@ func testPresignedPostPolicy() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -1426,43 +1681,49 @@ func testPresignedPostPolicy() {
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
- // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
}
policy := minio.NewPostPolicy()
if err := policy.SetBucket(""); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal()
}
if err := policy.SetKey(""); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal()
}
if err := policy.SetKeyStartsWith(""); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal()
}
if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal()
}
if err := policy.SetContentType(""); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal()
}
if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
- logger().Fatalf("Error: %s", err)
+ failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal()
}
policy.SetBucket(bucketName)
@@ -1470,10 +1731,11 @@ func testPresignedPostPolicy() {
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
policy.SetContentType("image/png")
policy.SetContentLengthRange(1024, 1024*1024)
+ args["policy"] = policy
_, _, err = c.PresignedPostPolicy(policy)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal()
}
policy = minio.NewPostPolicy()
@@ -1481,18 +1743,25 @@ func testPresignedPostPolicy() {
// Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests copy object
func testCopyObject() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(dst, src)"
+ args := map[string]interface{}{
+ "dst": "",
+ "src": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1504,7 +1773,7 @@ func testCopyObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -1519,38 +1788,37 @@ func testCopyObject() {
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Make a new bucket in 'us-east-1' (destination bucket).
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName+"-copy")
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("5"), rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
}
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
// Check the various fields of source object against destination object.
objInfo, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
// Copy Source
@@ -1561,120 +1829,130 @@ func testCopyObject() {
// All invalid conditions first.
err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetMatchETagCond("")
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetMatchETagExceptCond("")
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
}
err = src.SetMatchETagCond(objInfo.ETag)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
}
+ args["src"] = src
dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ args["dst"] = dst
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
// Perform the Copy
err = c.CopyObject(dst, src)
if err != nil {
- logger().Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
}
// Source object
- reader, err := c.GetObject(bucketName, objectName)
+ r, err = c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
+
// Destination object
readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
// Check the various fields of source object against destination object.
- objInfo, err = reader.Stat()
+ objInfo, err = r.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
objInfoCopy, err := readerCopy.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
if objInfo.Size != objInfoCopy.Size {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal()
}
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
}
err = src.SetMatchETagExceptCond(objInfo.ETag)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
}
// Perform the Copy which should fail
err = c.CopyObject(dst, src)
if err == nil {
- logger().Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
+ failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
}
// Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = c.RemoveBucket(bucketName + "-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// TestEncryptionPutGet tests client side encryption
func testEncryptionPutGet() {
- logger().Info()
-
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "cbcMaterials": "",
+ "metadata": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
- // Instantiate new minio client object.
- c, err := minio.New(
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -1685,11 +1963,12 @@ func testEncryptionPutGet() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Generate a symmetric key
@@ -1721,7 +2000,7 @@ func testEncryptionPutGet() {
"9945cb5c7d")
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
}
publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
@@ -1731,17 +2010,18 @@ func testEncryptionPutGet() {
"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
"80a89e43f29b570203010001")
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
}
// Generate an asymmetric key
asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal()
}
// Object custom metadata
customContentType := "custom/contenttype"
+ args["metadata"] = customContentType
testCases := []struct {
buf []byte
@@ -1770,43 +2050,45 @@ func testEncryptionPutGet() {
for i, testCase := range testCases {
// Generate a random object name
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
// Secured object
cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+ args["cbcMaterials"] = cbcMaterials
+
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal()
}
// Put encrypted data
_, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
if err != nil {
- logger().Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal()
}
// Read the data back
r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
if err != nil {
- logger().Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal()
}
defer r.Close()
// Compare the sent object with the received one
recvBuffer := bytes.NewBuffer([]byte{})
if _, err = io.Copy(recvBuffer, r); err != nil {
- logger().Fatalf("Test %d, error: %v", i+1, err)
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal()
}
if recvBuffer.Len() != len(testCase.buf) {
- logger().Fatalf("Test %d, error: number of bytes of received object does not match, want %v, got %v\n",
- i+1, len(testCase.buf), recvBuffer.Len())
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal()
}
if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- logger().Fatalf("Test %d, error: Encrypted sent is not equal to decrypted, want `%x`, go `%x`", i+1, testCase.buf, recvBuffer.Bytes())
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal()
}
// Remove test object
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatalf("Test %d, error: %v", i+1, err)
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal()
}
}
@@ -1814,20 +2096,26 @@ func testEncryptionPutGet() {
// Remove test bucket
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ err = c.RemoveBucket(bucketName)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
-
+ successLogger(function, args, startTime).Info()
}
func testBucketNotification() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "SetBucketNotification(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
if os.Getenv("NOTIFY_BUCKET") == "" ||
os.Getenv("NOTIFY_SERVICE") == "" ||
os.Getenv("NOTIFY_REGION") == "" ||
os.Getenv("NOTIFY_ACCOUNTID") == "" ||
os.Getenv("NOTIFY_RESOURCE") == "" {
- logger().Info("skipping notification test if not configured")
+ ignoredLog(function, args, startTime, "Skipped notification test as it is not configured").Info()
return
}
@@ -1841,7 +2129,7 @@ func testBucketNotification() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable to debug
@@ -1851,11 +2139,13 @@ func testBucketNotification() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
bucketName := os.Getenv("NOTIFY_BUCKET")
+ args["bucketName"] = bucketName
topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
topicConfig := minio.NewNotificationConfig(topicArn)
+
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterSuffix("jpg")
@@ -1870,7 +2160,7 @@ func testBucketNotification() {
// because it is duplicated
bNotification.AddTopic(topicConfig)
if len(bNotification.TopicConfigs) != 1 {
- logger().Fatal("Error: duplicated entry added")
+ failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal()
}
// Add and remove a queue config
@@ -1879,31 +2169,34 @@ func testBucketNotification() {
err = c.SetBucketNotification(bucketName, bNotification)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal()
}
bNotification, err = c.GetBucketNotification(bucketName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal()
}
if len(bNotification.TopicConfigs) != 1 {
- logger().Fatal("Error: Topic config is empty")
+ failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal()
}
if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
- logger().Fatal("Error: cannot get the suffix")
+ failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal()
}
err = c.RemoveAllBucketNotification(bucketName)
if err != nil {
- logger().Fatal("Error: cannot delete bucket notification")
+ failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests comprehensive list of all methods.
func testFunctional() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testFunctional()"
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1915,7 +2208,7 @@ func testFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable to debug
@@ -1929,21 +2222,26 @@ func testFunctional() {
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
+ function = "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
file, err := os.Create(fileName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File creation failed", err).Fatal()
}
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
_, err = file.Write(buf)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File write failed", err).Fatal()
}
}
file.Close()
@@ -1951,68 +2249,121 @@ func testFunctional() {
// Verify if bucket exits and you have access.
var exists bool
exists, err = c.BucketExists(bucketName)
+ function = "BucketExists(bucketName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
}
if !exists {
- logger().Fatal("Error: could not find ", bucketName)
+ failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal()
}
// Asserting the default bucket policy.
policyAccess, err := c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
}
if policyAccess != "none" {
- logger().Fatalf("Default bucket policy incorrect")
+ failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal()
}
// Set the bucket policy to 'public readonly'.
err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadOnly,
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
}
// should return policy `readonly`.
policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
}
if policyAccess != "readonly" {
- logger().Fatalf("Expected bucket policy to be readonly")
+ failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal()
}
// Make the bucket 'public writeonly'.
err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyWriteOnly,
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
}
// should return policy `writeonly`.
policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
}
if policyAccess != "writeonly" {
- logger().Fatalf("Expected bucket policy to be writeonly")
+ failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal()
}
// Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadWrite,
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
}
// should return policy `readwrite`.
policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
}
if policyAccess != "readwrite" {
- logger().Fatalf("Expected bucket policy to be readwrite")
+ failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal()
}
// List all buckets.
buckets, err := c.ListBuckets()
+ function = "ListBuckets()"
+ args = nil
+
if len(buckets) == 0 {
- logger().Fatal("Error: list buckets cannot be empty", buckets)
+ failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal()
}
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
}
// Verify if previously created bucket is listed in list buckets.
@@ -2025,7 +2376,7 @@ func testFunctional() {
// If bucket not found error out.
if !bucketFound {
- logger().Fatal("Error: bucket ", bucketName, "not found")
+ failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal()
}
objectName := bucketName + "unique"
@@ -2034,20 +2385,34 @@ func testFunctional() {
buf := bytes.Repeat([]byte("f"), 1<<19)
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ function = "PutObject(bucketName, objectName, reader, contentType)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
+
if n != int64(len(buf)) {
- logger().Fatal("Error: bad length ", n, len(buf))
+ failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
}
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-nolength",
+ "contentType": "binary/octet-stream",
+ }
+
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-nolength")
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
}
// Instantiate a done channel to close all listing.
@@ -2056,6 +2421,14 @@ func testFunctional() {
objFound := false
isRecursive := true // Recursive is true.
+
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
@@ -2063,11 +2436,18 @@ func testFunctional() {
}
}
if !objFound {
- logger().Fatal("Error: object " + objectName + " not found.")
+ failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
}
objFound = false
isRecursive = true // Recursive is true.
+ function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
@@ -2075,10 +2455,18 @@ func testFunctional() {
}
}
if !objFound {
- logger().Fatal("Error: object " + objectName + " not found.")
+ failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
}
incompObjNotFound := true
+
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
@@ -2086,86 +2474,147 @@ func testFunctional() {
}
}
if !incompObjNotFound {
- logger().Fatal("Error: unexpected dangling incomplete upload found.")
+ failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
}
newReader, err := c.GetObject(bucketName, objectName)
+ function = "GetObject(bucketName, objectName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
if !bytes.Equal(newReadBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal()
}
err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ function = "FGetObject(bucketName, objectName, fileName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal()
+ }
+
+ // Generate presigned HEAD object url.
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
}
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ }
+ if resp.Header.Get("ETag") == "" {
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+ }
+ resp.Body.Close()
// Generate presigned GET object url.
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
}
// Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
+ resp, err = http.Get(presignedGetURL.String())
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
}
if resp.StatusCode != http.StatusOK {
- logger().Fatal("Error: ", resp.Status)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
}
+ resp.Body.Close()
if !bytes.Equal(newPresignedBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
}
// Set request parameters.
reqParams := make(url.Values)
reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ "reqParams": reqParams,
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
}
if resp.StatusCode != http.StatusOK {
- logger().Fatal("Error: ", resp.Status)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
}
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
}
if !bytes.Equal(newPresignedBytes, buf) {
- logger().Fatal("Error: bytes mismatch for presigned GET URL.")
+ failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal()
}
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- logger().Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal()
}
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
}
buf = bytes.Repeat([]byte("g"), 1<<19)
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal()
}
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
@@ -2176,62 +2625,90 @@ func testFunctional() {
}
resp, err = httpClient.Do(req)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
}
newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal()
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal()
}
if !bytes.Equal(newReadBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
+ function = "RemoveObject(bucketName, objectName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-f")
+ args["objectName"] = objectName + "-f"
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
+
err = c.RemoveObject(bucketName, objectName+"-nolength")
+ args["objectName"] = objectName + "-nolength"
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
+
err = c.RemoveObject(bucketName, objectName+"-presigned")
+ args["objectName"] = objectName + "-presigned"
+
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
+
err = c.RemoveBucket(bucketName)
+ function = "RemoveBucket(bucketName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err == nil {
- logger().Fatal("Error:")
+ failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal()
}
if err.Error() != "The specified bucket does not exist" {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
if err = os.Remove(fileName); err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
}
if err = os.Remove(fileName + "-f"); err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
}
+ function = "testFunctional()"
+ successLogger(function, args, startTime).Info()
}
// Test for validating GetObject Reader* methods functioning when the
// object is modified in the object store.
func testGetObjectObjectModified() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Instantiate new minio client object.
c, err := minio.NewV4(
@@ -2241,7 +2718,7 @@ func testGetObjectObjectModified() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2252,9 +2729,11 @@ func testGetObjectObjectModified() {
// Make a new bucket.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
defer c.RemoveBucket(bucketName)
@@ -2263,14 +2742,14 @@ func testGetObjectObjectModified() {
content := "helloworld"
_, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
if err != nil {
- logger().Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
+ failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
}
defer c.RemoveObject(bucketName, objectName)
reader, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatalf("Failed to get object %s/%s: %v", bucketName, objectName, err)
+ failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal()
}
defer reader.Close()
@@ -2278,32 +2757,42 @@ func testGetObjectObjectModified() {
b := make([]byte, 5)
n, err := reader.ReadAt(b, 0)
if err != nil {
- logger().Fatalf("Failed to read object %s/%s at an offset: %v", bucketName, objectName, err)
+ failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal()
}
// Upload different contents to the same object while object is being read.
newContent := "goodbyeworld"
_, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
if err != nil {
- logger().Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
+ failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
}
// Confirm that a Stat() call in between doesn't change the Object's cached etag.
_, err = reader.Stat()
- if err.Error() != "At least one of the pre-conditions you specified did not hold" {
- log.Error(fmt.Errorf("Expected Stat to fail with error %s but received %s", "At least one of the pre-conditions you specified did not hold", err.Error()))
+ expectedError := "At least one of the pre-conditions you specified did not hold"
+ if err.Error() != expectedError {
+ failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
}
// Read again only to find object contents have been modified since last read.
_, err = reader.ReadAt(b, int64(n))
- if err.Error() != "At least one of the pre-conditions you specified did not hold" {
- log.Error(fmt.Errorf("Expected ReadAt to fail with error %s but received %s", "At least one of the pre-conditions you specified did not hold", err.Error()))
+ if err.Error() != expectedError {
+ failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test validates putObject to upload a file seeked at a given offset.
func testPutObjectUploadSeekedObject() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileToUpload": "",
+ "contentType": "binary/octet-stream",
+ }
// Instantiate new minio client object.
c, err := minio.NewV4(
@@ -2313,7 +2802,7 @@ func testPutObjectUploadSeekedObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2324,15 +2813,19 @@ func testPutObjectUploadSeekedObject() {
// Make a new bucket.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
defer c.RemoveBucket(bucketName)
tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
+ args["fileToUpload"] = tempfile
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal()
}
var data []byte
@@ -2344,65 +2837,78 @@ func testPutObjectUploadSeekedObject() {
}
var length = len(data)
if _, err = tempfile.Write(data); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal()
}
objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
offset := length / 2
if _, err := tempfile.Seek(int64(offset), 0); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal()
}
n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
if n != int64(length-offset) {
- logger().Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
+ failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
}
tempfile.Close()
if err = os.Remove(tempfile.Name()); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
}
length = int(n)
obj, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
n, err = obj.Seek(int64(offset), 0)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
}
if n != int64(offset) {
- logger().Fatalf("Invalid offset returned, want %v, got %v", int64(offset), n)
+ failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal()
}
n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
if n != int64(length-offset) {
- logger().Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
+ failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
}
if err = c.RemoveObject(bucketName, objectName); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests bucket re-create errors.
func testMakeBucketErrorV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- logger().Info("skipping region functional tests for non s3 runs")
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -2417,7 +2923,7 @@ func testMakeBucketErrorV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2428,27 +2934,35 @@ func testMakeBucketErrorV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket in 'eu-west-1'.
if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
- logger().Fatal("Error: make bucket should should fail for", bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal()
}
// Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- logger().Fatal("Error: Invalid error returned by server", err)
+ failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
}
if err = c.RemoveBucket(bucketName); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test get object reader to not throw error on being closed twice.
func testGetObjectClosedTwiceV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2461,7 +2975,7 @@ func testGetObjectClosedTwiceV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2472,61 +2986,72 @@ func testGetObjectClosedTwiceV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K.
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
}
if err := r.Close(); err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
if err := r.Close(); err == nil {
- logger().Fatal("Error: object is already closed, should return error")
+ failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests removing partially uploaded objects.
func testRemovePartiallyUploadedV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2539,7 +3064,7 @@ func testRemovePartiallyUploadedV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Set user agent.
@@ -2550,11 +3075,12 @@ func testRemovePartiallyUploadedV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
@@ -2565,7 +3091,7 @@ func testRemovePartiallyUploadedV2() {
for i < 25 {
_, cerr := io.CopyN(writer, r, 128*1024)
if cerr != nil {
- logger().Fatal("Error:", cerr, bucketName)
+ failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal()
}
i++
r.Seek(0, 0)
@@ -2574,26 +3100,37 @@ func testRemovePartiallyUploadedV2() {
}()
objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
- logger().Fatal("Error: PutObject should fail.")
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
}
if err.Error() != "proactively closed to be verified later" {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal()
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests FPutObject hidden contentType setting
func testFPutObjectV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "contentType": "application/octet-stream",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2606,7 +3143,7 @@ func testFPutObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2617,132 +3154,146 @@ func testFPutObjectV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Make a temp file with 11*1024*1024 bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
}
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
n, err := io.CopyN(file, r, 11*1024*1024)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
if n != int64(11*1024*1024) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
}
// Close the file pro-actively for windows.
err = file.Close()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
}
// Set base object name
objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
+ args["fileName"] = file.Name()
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(11*1024*1024) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
}
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(11*1024*1024) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
}
// Add extension to temp file name
fileName := file.Name()
err = os.Rename(file.Name(), fileName+".gtar")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Rename failed", err).Fatal()
}
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+ args["fileName"] = fileName + ".gtar"
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
}
if n != int64(11*1024*1024) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
}
// Check headers
rStandard, err := c.StatObject(bucketName, objectName+"-standard")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-standard")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rStandard.ContentType != "application/octet-stream" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal()
}
rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-Octet")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rOctet.ContentType != "application/octet-stream" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal()
}
rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-GTar")
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if rGTar.ContentType != "application/x-gtar" {
- logger().Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal()
}
// Remove all objects and bucket and temp file
err = c.RemoveObject(bucketName, objectName+"-standard")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-Octet")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-GTar")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = os.Remove(fileName + ".gtar")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
}
-
+ successLogger(function, args, startTime).Info()
}
// Tests various bucket supported formats.
func testMakeBucketRegionsV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- logger().Info("skipping region functional tests for non s3 runs")
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -2757,7 +3308,7 @@ func testMakeBucketRegionsV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2768,32 +3319,43 @@ func testMakeBucketRegionsV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
if err = c.RemoveBucket(bucketName); err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
// Make a new bucket with '.' in its name, in 'us-west-2'. This
// request is internally staged into a path style instead of
// virtual host style.
if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- logger().Fatal("Error:", err, bucketName+".withperiod")
+ args["bucketName"] = bucketName + ".withperiod"
+ args["region"] = "us-west-2"
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Remove the newly created bucket.
if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- logger().Fatal("Error:", err, bucketName+".withperiod")
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests get object ReaderSeeker interface methods.
func testGetObjectReadSeekFunctionalV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2805,7 +3367,7 @@ func testGetObjectReadSeekFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2816,114 +3378,129 @@ func testGetObjectReadSeekFunctionalV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K.
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
- // Save the data.
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data.
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
- logger().Fatal("Error:", err, offset)
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
}
if n != offset {
- logger().Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
- offset, n)
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
}
n, err = r.Seek(0, 1)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
}
if n != offset {
- logger().Fatalf("Error: number of current seek does not match, want %v, got %v\n",
- offset, n)
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
}
_, err = r.Seek(offset, 2)
if err == nil {
- logger().Fatal("Error: seek on positive offset for whence '2' should error out")
+ failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal()
}
n, err = r.Seek(-offset, 2)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
}
if n != st.Size-offset {
- logger().Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal()
}
var buffer1 bytes.Buffer
if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
}
if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
- logger().Fatal("Error: Incorrect read bytes v/s original buffer.")
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
}
// Seek again and read again.
n, err = r.Seek(offset-1, 0)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
}
if n != (offset - 1) {
- logger().Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal()
}
var buffer2 bytes.Buffer
if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
}
}
// Verify now lesser bytes.
if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- logger().Fatal("Error: Incorrect read bytes v/s original buffer.")
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests get object ReaderAt interface methods.
func testGetObjectReadAtFunctionalV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2935,7 +3512,7 @@ func testGetObjectReadAtFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -2946,40 +3523,49 @@ func testGetObjectReadAtFunctionalV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
- // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
st, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
- if st.Size != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
}
offset := int64(2048)
@@ -2991,35 +3577,35 @@ func testGetObjectReadAtFunctionalV2() {
m, err := r.ReadAt(buf2, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf2), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf2) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf3), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf3) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal()
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
- logger().Fatal("Error:", err, st.Size, len(buf4), offset)
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
if m != len(buf4) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal()
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logger().Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
}
buf5 := make([]byte, n)
@@ -3027,14 +3613,14 @@ func testGetObjectReadAtFunctionalV2() {
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err, len(buf5))
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
}
if m != len(buf5) {
- logger().Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal()
}
if !bytes.Equal(buf, buf5) {
- logger().Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
+ failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
}
buf6 := make([]byte, n+1)
@@ -3042,22 +3628,29 @@ func testGetObjectReadAtFunctionalV2() {
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
- logger().Fatal("Error:", err, len(buf6))
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
}
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Tests copy object
func testCopyObjectV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{
+ "destination": "",
+ "source": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3070,7 +3663,7 @@ func testCopyObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -3085,38 +3678,38 @@ func testCopyObjectV2() {
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Make a new bucket in 'us-east-1' (destination bucket).
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName+"-copy")
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
- // Generate data more than 32K
- var buf = getDataBuffer("datafile-33-kB", rand.Intn(1<<20)+32*1024)
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
- if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
}
r, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
// Check the various fields of source object against destination object.
objInfo, err := r.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
// Copy Source
@@ -3127,112 +3720,120 @@ func testCopyObjectV2() {
// All invalid conditions first.
err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetMatchETagCond("")
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetMatchETagExceptCond("")
if err == nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
}
err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
}
err = src.SetMatchETagCond(objInfo.ETag)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
}
+ args["source"] = src
dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
+ args["destination"] = dst
// Perform the Copy
err = c.CopyObject(dst, src)
if err != nil {
- logger().Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
}
// Source object
- reader, err := c.GetObject(bucketName, objectName)
+ r, err = c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
// Destination object
readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
// Check the various fields of source object against destination object.
- objInfo, err = reader.Stat()
+ objInfo, err = r.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
objInfoCopy, err := readerCopy.Stat()
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
if objInfo.Size != objInfoCopy.Size {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal()
}
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
}
err = src.SetMatchETagExceptCond(objInfo.ETag)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
}
// Perform the Copy which should fail
err = c.CopyObject(dst, src)
if err == nil {
- logger().Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
+ failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
}
// Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = c.RemoveBucket(bucketName + "-copy")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
func testComposeObjectErrorCasesWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCasesWrapper(minioClient)"
+ args := map[string]interface{}{}
+
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
+
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Test that more than 10K source objects cannot be
@@ -3241,13 +3842,13 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) {
srcSlice := srcArr[:]
dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
if err := c.ComposeObject(dst, srcSlice); err == nil {
- logger().Fatal("Error was expected.")
+ failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal()
} else if err.Error() != "There must be as least one and up to 10000 source objects." {
- logger().Fatal("Got unexpected error: ", err)
+ failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal()
}
// Create a source with invalid offset spec and check that
@@ -3257,26 +3858,30 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) {
buf := bytes.Repeat([]byte("1"), badSrcSize)
_, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
// 2. Set invalid range spec on the object (going beyond
// object size)
badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
err = badSrc.SetRange(1, badSrcSize)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal()
}
// 3. ComposeObject call should fail.
if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
- logger().Fatal("Error was expected.")
+ failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal()
} else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
- logger().Fatal("Got unexpected error: ", err)
+ failureLog(function, args, startTime, "", "Got invalid error", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test expected error cases
func testComposeObjectErrorCasesV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCasesV2()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV2(
@@ -3286,19 +3891,27 @@ func testComposeObjectErrorCasesV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
testComposeObjectErrorCasesWrapper(c)
}
func testComposeMultipleSources(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "ComposeObject(destination, sources)"
+ args := map[string]interface{}{
+ "destination": "",
+ "sources": "",
+ }
+
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Upload a small source object
@@ -3306,7 +3919,7 @@ func testComposeMultipleSources(c *minio.Client) {
buf := bytes.Repeat([]byte("1"), srcSize)
_, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
// We will append 10 copies of the object.
@@ -3317,31 +3930,38 @@ func testComposeMultipleSources(c *minio.Client) {
// make the last part very small
err = srcs[9].SetRange(0, 0)
if err != nil {
- logger().Fatal("unexpected error:", err)
+ failureLog(function, args, startTime, "", "SetRange failed", err).Fatal()
}
+ args["sources"] = srcs
dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
+ args["destination"] = dst
+
if err != nil {
- logger().Fatal(err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
err = c.ComposeObject(dst, srcs)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
}
objProps, err := c.StatObject(bucketName, "dstObject")
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
}
if objProps.Size != 9*srcSize+1 {
- logger().Fatal("Size mismatched! Expected:", 10000*srcSize, "but got:", objProps.Size)
+ failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test concatenating multiple objects objects
func testCompose10KSourcesV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testCompose10KSourcesV2(minioClient)"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV2(
@@ -3351,18 +3971,24 @@ func testCompose10KSourcesV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
testComposeMultipleSources(c)
}
+
func testEncryptedCopyObjectWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObjectWrapper(minioClient)"
+ args := map[string]interface{}{}
+
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
@@ -3377,19 +4003,19 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
}
_, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
if err != nil {
- logger().Fatal("PutObjectWithSize Error:", err)
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
}
// 2. copy object and change encryption key
src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
err = c.CopyObject(dst, src)
if err != nil {
- logger().Fatal("CopyObject Error:", err)
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
}
// 3. get copied object and check if content is equal
@@ -3400,22 +4026,26 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
coreClient := minio.Core{c}
reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
if err != nil {
- logger().Fatal("GetObject Error:", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
defer reader.Close()
decBytes, err := ioutil.ReadAll(reader)
if err != nil {
- logger().Fatalln(err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
if !bytes.Equal(decBytes, buf) {
- logger().Fatal("downloaded object mismatched for encrypted object")
+ failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test encrypted copy object
func testEncryptedCopyObject() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObject()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV4(
@@ -3425,7 +4055,7 @@ func testEncryptedCopyObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
// c.TraceOn(os.Stderr)
@@ -3434,7 +4064,10 @@ func testEncryptedCopyObject() {
// Test encrypted copy object
func testEncryptedCopyObjectV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObjectV2()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV2(
@@ -3444,13 +4077,17 @@ func testEncryptedCopyObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
}
testEncryptedCopyObjectWrapper(c)
}
+
func testUserMetadataCopying() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testUserMetadataCopying()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV4(
@@ -3460,25 +4097,34 @@ func testUserMetadataCopying() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
// c.TraceOn(os.Stderr)
testUserMetadataCopyingWrapper(c)
}
+
func testUserMetadataCopyingWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{
+ "destination": "",
+ "source": "",
+ }
+
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
fetchMeta := func(object string) (h http.Header) {
objInfo, err := c.StatObject(bucketName, object)
if err != nil {
- logger().Fatal("Metadata fetch error:", err)
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
}
h = make(http.Header)
for k, vs := range objInfo.Metadata {
@@ -3499,10 +4145,10 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
_, err = c.PutObjectWithMetadata(bucketName, "srcObject",
bytes.NewReader(buf), metadata, nil)
if err != nil {
- logger().Fatal("Put Error:", err)
+ failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal()
}
if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
- logger().Fatal("Unequal metadata")
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
}
// 2. create source
@@ -3510,26 +4156,29 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
// 2.1 create destination with metadata set
dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
// 3. Check that copying to an object with metadata set resets
// the headers on the copy.
err = c.CopyObject(dst1, src)
+ args["destination"] = dst1
+ args["source"] = src
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
}
expectedHeaders := make(http.Header)
expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
- logger().Fatal("Unequal metadata")
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
}
// 4. create destination with no metadata set and same source
dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
src = minio.NewSourceInfo(bucketName, "srcObject", nil)
@@ -3537,13 +4186,16 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
// 5. Check that copying to an object with no metadata set,
// copies metadata.
err = c.CopyObject(dst2, src)
+ args["destination"] = dst2
+ args["source"] = src
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
}
expectedHeaders = metadata
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
- logger().Fatal("Unequal metadata")
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
}
// 6. Compose a pair of sources.
@@ -3553,18 +4205,21 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
}
dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
if err != nil {
- logger().Fatal("Error:", err)
-
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
err = c.ComposeObject(dst3, srcs)
+ function = "ComposeObject(destination, sources)"
+ args["destination"] = dst3
+ args["source"] = srcs
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
}
// Check that no headers are copied in this case
if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
- logger().Fatal("Unequal metadata")
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
}
// 7. Compose a pair of sources with dest user metadata set.
@@ -3574,25 +4229,32 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
}
dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
if err != nil {
- logger().Fatal("Error:", err)
-
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
}
err = c.ComposeObject(dst4, srcs)
+ function = "ComposeObject(destination, sources)"
+ args["destination"] = dst4
+ args["source"] = srcs
+
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
}
// Check that no headers are copied in this case
expectedHeaders = make(http.Header)
expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
- logger().Fatal("Unequal metadata")
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
func testUserMetadataCopyingV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testUserMetadataCopyingV2()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV2(
@@ -3602,7 +4264,7 @@ func testUserMetadataCopyingV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
}
// c.TraceOn(os.Stderr)
@@ -3611,7 +4273,15 @@ func testUserMetadataCopyingV2() {
// Test put object with size -1 byte object.
func testPutObjectNoLengthV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": -1,
+ "metadata": nil,
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3624,7 +4294,7 @@ func testPutObjectNoLengthV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -3636,44 +4306,137 @@ func testPutObjectNoLengthV2() {
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
"minio-go-test")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- log.Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
objectName := bucketName + "unique"
+ args["objectName"] = objectName
// Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
// Use different data for each part for multipart tests to ensure part order at the end.
- var buf = getDataBuffer("datafile-65-MB", MinPartSize)
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
// Upload an object.
- n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader(buf), -1, nil, nil)
+ n, err := c.PutObjectWithSize(bucketName, objectName, reader, -1, nil, nil)
if err != nil {
- log.Fatalf("Error: %v %s %s", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
}
- if n != int64(len(buf)) {
- log.Error(fmt.Errorf("Expected upload object size %d but got %d", len(buf), n))
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
}
// Remove the object.
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
// Remove the bucket.
err = c.RemoveBucket(bucketName)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
+}
+
+// Test put objects of unknown size.
+func testPutObjectsUnknownV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectStreaming(bucketName, objectName, reader)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+ "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Issues are revealed by trying to upload multiple files of unknown size
+ // sequentially (on 4GB machines)
+ for i := 1; i <= 4; i++ {
+ // Simulate that we could be receiving byte slices of data that we want
+ // to upload as a file
+ rpipe, wpipe := io.Pipe()
+ defer rpipe.Close()
+ go func() {
+ b := []byte("test")
+ wpipe.Write(b)
+ wpipe.Close()
+ }()
+
+ // Upload the object.
+ objectName := fmt.Sprintf("%sunique%d", bucketName, i)
+ args["objectName"] = objectName
+
+ n, err := c.PutObjectStreaming(bucketName, objectName, rpipe)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+ }
+ if n != int64(4) {
+ failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal()
+ }
+
+ // Remove the object.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ }
+
+ // Remove the bucket.
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
}
// Test put object with 0 byte object.
func testPutObject0ByteV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": 0,
+ "metadata": nil,
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3686,7 +4449,7 @@ func testPutObject0ByteV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
}
// Enable tracing, write to stderr.
@@ -3702,7 +4465,7 @@ func testPutObject0ByteV2() {
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- log.Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
objectName := bucketName + "unique"
@@ -3710,28 +4473,32 @@ func testPutObject0ByteV2() {
// Upload an object.
n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader([]byte("")), 0, nil, nil)
if err != nil {
- log.Fatalf("Error: %v %s %s", err, bucketName, objectName)
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
}
if n != 0 {
- log.Error(fmt.Errorf("Expected upload object size 0 but got %d", n))
+ failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal()
}
// Remove the object.
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
// Remove the bucket.
err = c.RemoveBucket(bucketName)
if err != nil {
- log.Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Test expected error cases
func testComposeObjectErrorCases() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCases()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV4(
@@ -3741,7 +4508,7 @@ func testComposeObjectErrorCases() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
testComposeObjectErrorCasesWrapper(c)
@@ -3749,7 +4516,10 @@ func testComposeObjectErrorCases() {
// Test concatenating 10K objects
func testCompose10KSources() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testCompose10KSources()"
+ args := map[string]interface{}{}
// Instantiate new minio client object
c, err := minio.NewV4(
@@ -3759,7 +4529,7 @@ func testCompose10KSources() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
}
testComposeMultipleSources(c)
@@ -3767,7 +4537,11 @@ func testCompose10KSources() {
// Tests comprehensive list of all methods.
func testFunctionalV2() {
- logger().Info()
+ // initialize logging params
+ startTime := time.Now()
+ function := "testFunctionalV2()"
+ args := map[string]interface{}{}
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3778,7 +4552,7 @@ func testFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
}
// Enable to debug
@@ -3793,20 +4567,20 @@ func testFunctionalV2() {
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
file, err := os.Create(fileName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "file create failed", err).Fatal()
}
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
_, err = file.Write(buf)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "file write failed", err).Fatal()
}
}
file.Close()
@@ -3815,25 +4589,25 @@ func testFunctionalV2() {
var exists bool
exists, err = c.BucketExists(bucketName)
if err != nil {
- logger().Fatal("Error:", err, bucketName)
+ failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
}
if !exists {
- logger().Fatal("Error: could not find ", bucketName)
+ failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal()
}
// Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
}
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
- logger().Fatal("Error: list buckets cannot be empty", buckets)
+ failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal()
}
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
}
// Verify if previously created bucket is listed in list buckets.
@@ -3846,7 +4620,7 @@ func testFunctionalV2() {
// If bucket not found error out.
if !bucketFound {
- logger().Fatal("Error: bucket ", bucketName, "not found")
+ failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal()
}
objectName := bucketName + "unique"
@@ -3856,19 +4630,19 @@ func testFunctionalV2() {
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
if n != int64(len(buf)) {
- logger().Fatal("Error: bad length ", n, len(buf))
+ failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
}
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
- logger().Fatal("Error:", err, bucketName, objectName+"-nolength")
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
}
if n != int64(len(buf)) {
- logger().Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
}
// Instantiate a done channel to close all listing.
@@ -3884,7 +4658,7 @@ func testFunctionalV2() {
}
}
if !objFound {
- logger().Fatal("Error: object " + objectName + " not found.")
+ failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
}
objFound = false
@@ -3896,7 +4670,7 @@ func testFunctionalV2() {
}
}
if !objFound {
- logger().Fatal("Error: object " + objectName + " not found.")
+ failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
}
incompObjNotFound := true
@@ -3907,47 +4681,66 @@ func testFunctionalV2() {
}
}
if !incompObjNotFound {
- logger().Fatal("Error: unexpected dangling incomplete upload found.")
+ failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
}
newReader, err := c.GetObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
if !bytes.Equal(newReadBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
}
err = c.FGetObject(bucketName, objectName, fileName+"-f")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal()
}
+ // Generate presigned HEAD object url.
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ }
+ if resp.Header.Get("ETag") == "" {
+ failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal()
+ }
+ resp.Body.Close()
+
// Generate presigned GET object url.
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
}
// Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
+ resp, err = http.Get(presignedGetURL.String())
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
}
if resp.StatusCode != http.StatusOK {
- logger().Fatal("Error: ", resp.Status)
+ failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
+ resp.Body.Close()
if !bytes.Equal(newPresignedBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
}
// Set request parameters.
@@ -3956,38 +4749,38 @@ func testFunctionalV2() {
// Generate presigned GET object url.
presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
}
if resp.StatusCode != http.StatusOK {
- logger().Fatal("Error: ", resp.Status)
+ failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
}
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
if !bytes.Equal(newPresignedBytes, buf) {
- logger().Fatal("Error: bytes mismatch for presigned GET url.")
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
}
// Verify content disposition.
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- logger().Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal()
}
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
}
// Generate data more than 32K
- buf = bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
}
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
@@ -3998,56 +4791,57 @@ func testFunctionalV2() {
}
resp, err = httpClient.Do(req)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
}
newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
}
if !bytes.Equal(newReadBytes, buf) {
- logger().Fatal("Error: bytes mismatch.")
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-f")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-nolength")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveObject(bucketName, objectName+"-presigned")
if err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err != nil {
- logger().Fatal("Error:", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
}
err = c.RemoveBucket(bucketName)
if err == nil {
- logger().Fatal("Error:")
+ failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal()
}
if err.Error() != "The specified bucket does not exist" {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal()
}
if err = os.Remove(fileName); err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
}
if err = os.Remove(fileName + "-f"); err != nil {
- logger().Fatal("Error: ", err)
+ failureLog(function, args, startTime, "", "File removes failed", err).Fatal()
}
+ successLogger(function, args, startTime).Info()
}
// Convert string to bool and always return false if any error
@@ -4059,17 +4853,16 @@ func mustParseBool(str string) bool {
return b
}
-func logger() *logrus.Entry {
- if pc, file, line, ok := runtime.Caller(1); ok {
- fName := runtime.FuncForPC(pc).Name()
- return log.WithFields(log.Fields{"file": path.Base(file), "function:": fName, "line#": line})
-
- }
- return log.WithFields(nil)
-}
-
func main() {
- logger().Info("Running functional tests for minio-go sdk....")
+ // Output to stdout instead of the default stderr
+ log.SetOutput(os.Stdout)
+ // create custom formatter
+ mintFormatter := mintJSONFormatter{}
+ // set custom formatter
+ log.SetFormatter(&mintFormatter)
+ // log Info or above -- success cases are Info level, failures are Fatal level
+ log.SetLevel(log.InfoLevel)
+ // execute tests
if !isQuickMode() {
testMakeBucketErrorV2()
testGetObjectClosedTwiceV2()
@@ -4086,6 +4879,7 @@ func main() {
testUserMetadataCopyingV2()
testPutObject0ByteV2()
testPutObjectNoLengthV2()
+ testPutObjectsUnknownV2()
testMakeBucketError()
testMakeBucketRegions()
testPutObjectWithMetadata()
@@ -4112,10 +4906,7 @@ func main() {
testGetObjectObjectModified()
testPutObjectUploadSeekedObject()
} else {
- logger().Info("Running short functional tests")
testFunctional()
testFunctionalV2()
}
-
- logger().Info("Functional tests complete for minio-go sdk")
}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
index 79bebd99f..bdc8d4e91 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -80,6 +80,9 @@ func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
}
+// AmazonS3Host - regular expression used to determine if an arg is s3 host.
+var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
+
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
@@ -88,7 +91,7 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonGovCloudEndpoint(endpointURL) {
return true
}
- return endpointURL.Host == "s3.amazonaws.com"
+ return AmazonS3Host.MatchString(endpointURL.Host)
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
diff --git a/vendor/github.com/mssola/user_agent/.travis.yml b/vendor/github.com/mssola/user_agent/.travis.yml
index add0c8a6c..96f43d112 100644
--- a/vendor/github.com/mssola/user_agent/.travis.yml
+++ b/vendor/github.com/mssola/user_agent/.travis.yml
@@ -1,9 +1,12 @@
language: go
go:
- - 1.0
- - 1.1
- - 1.2
- - 1.3
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.x
- tip
matrix:
allow_failures:
diff --git a/vendor/github.com/mssola/user_agent/LICENSE b/vendor/github.com/mssola/user_agent/LICENSE
index c02b64473..ede19fbfb 100644
--- a/vendor/github.com/mssola/user_agent/LICENSE
+++ b/vendor/github.com/mssola/user_agent/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2012-2014 Miquel Sabaté Solà
+Copyright (c) 2012-2017 Miquel Sabaté Solà
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/vendor/github.com/mssola/user_agent/README.md b/vendor/github.com/mssola/user_agent/README.md
index 971902560..575977df2 100644
--- a/vendor/github.com/mssola/user_agent/README.md
+++ b/vendor/github.com/mssola/user_agent/README.md
@@ -19,7 +19,7 @@ func main() {
// The "New" function will create a new UserAgent object and it will parse
// the given string. If you need to parse more strings, you can re-use
// this object and call: ua.Parse("another string")
- ua := user_agent.New("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11");
+ ua := user_agent.New("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11")
fmt.Printf("%v\n", ua.Mobile()) // => false
fmt.Printf("%v\n", ua.Bot()) // => false
@@ -48,4 +48,4 @@ func main() {
}
~~~
-Copyright &copy; 2012-2014 Miquel Sabaté Solà, released under the MIT License.
+Copyright &copy; 2012-2017 Miquel Sabaté Solà, released under the MIT License.
diff --git a/vendor/github.com/mssola/user_agent/all_test.go b/vendor/github.com/mssola/user_agent/all_test.go
index 4f3c03198..fb0cf054e 100644
--- a/vendor/github.com/mssola/user_agent/all_test.go
+++ b/vendor/github.com/mssola/user_agent/all_test.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2012-2014 Miquel Sabaté Solà <mikisabate@gmail.com>
+// Copyright (C) 2012-2017 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
@@ -6,189 +6,514 @@ package user_agent
import (
"fmt"
+ "reflect"
"testing"
)
// Slice that contains all the tests. Each test is contained in a struct
-// that groups the name of the test and the User-Agent string to be tested.
+// that groups the title of the test, the User-Agent string to be tested and the expected value.
var uastrings = []struct {
- name string
- ua string
+ title string
+ ua string
+ expected string
+ expectedOS *OSInfo
}{
// Bots
- {"GoogleBot", "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"},
- {"GoogleBotSmartphone", "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"},
- {"BingBot", "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"},
- {"BaiduBot", "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"},
- {"Twitterbot", "Twitterbot"},
- {"YahooBot", "Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)"},
- {"FacebookExternalHit", "facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)"},
- {"FacebookPlatform", "facebookplatform/1.0 (+http://developers.facebook.com)"},
- {"FaceBot", "Facebot"},
+ {
+ title: "GoogleBot",
+ ua: "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
+ expected: "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:false",
+ },
+ {
+ title: "GoogleBotSmartphone",
+ ua: "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
+ expected: "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:true",
+ },
+ {
+ title: "BingBot",
+ ua: "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
+ expected: "Mozilla:5.0 Browser:bingbot-2.0 Bot:true Mobile:false",
+ },
+ {
+ title: "BaiduBot",
+ ua: "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)",
+ expected: "Mozilla:5.0 Browser:Baiduspider-2.0 Bot:true Mobile:false",
+ },
+ {
+ title: "Twitterbot",
+ ua: "Twitterbot",
+ expected: "Browser:Twitterbot Bot:true Mobile:false",
+ },
+ {
+ title: "YahooBot",
+ ua: "Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
+ expected: "Mozilla:5.0 Browser:Yahoo! Slurp Bot:true Mobile:false",
+ },
+ {
+ title: "FacebookExternalHit",
+ ua: "facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)",
+ expected: "Browser:facebookexternalhit-1.1 Bot:true Mobile:false",
+ },
+ {
+ title: "FacebookPlatform",
+ ua: "facebookplatform/1.0 (+http://developers.facebook.com)",
+ expected: "Browser:facebookplatform-1.0 Bot:true Mobile:false",
+ },
+ {
+ title: "FaceBot",
+ ua: "Facebot",
+ expected: "Browser:Facebot Bot:true Mobile:false",
+ },
+ {
+ title: "NutchCVS",
+ ua: "NutchCVS/0.8-dev (Nutch; http://lucene.apache.org/nutch/bot.html; nutch-agent@lucene.apache.org)",
+ expected: "Browser:NutchCVS Bot:true Mobile:false",
+ },
+ {
+ title: "MJ12bot",
+ ua: "Mozilla/5.0 (compatible; MJ12bot/v1.2.4; http://www.majestic12.co.uk/bot.php?+)",
+ expected: "Mozilla:5.0 Browser:MJ12bot-v1.2.4 Bot:true Mobile:false",
+ },
+ {
+ title: "MJ12bot",
+ ua: "MJ12bot/v1.0.8 (http://majestic12.co.uk/bot.php?+)",
+ expected: "Browser:MJ12bot Bot:true Mobile:false",
+ },
+ {
+ title: "AhrefsBot",
+ ua: "Mozilla/5.0 (compatible; AhrefsBot/4.0; +http://ahrefs.com/robot/)",
+ expected: "Mozilla:5.0 Browser:AhrefsBot-4.0 Bot:true Mobile:false",
+ },
// Internet Explorer
- {"IE10", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)"},
- {"Tablet", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.2; ARM; Trident/6.0; Touch; .NET4.0E; .NET4.0C; Tablet PC 2.0)"},
- {"Touch", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)"},
- {"Phone", "Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; SGH-i917)"},
- {"IE6", "Mozilla/4.0 (compatible; MSIE6.0; Windows NT 5.0; .NET CLR 1.1.4322)"},
- {"IE8Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)"},
- {"IE10Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)"},
- {"IE11Win81", "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"},
- {"IE11Win7", "Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko"},
- {"IE11b32Win7b64", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"},
- {"IE11b32Win7b64MDDRJS", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; MDDRJS; rv:11.0) like Gecko"},
- {"IE11Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0)"},
+ {
+ title: "IE10",
+ ua: "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows 8", "Windows", "8"},
+ },
+ {
+ title: "Tablet",
+ ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.2; ARM; Trident/6.0; Touch; .NET4.0E; .NET4.0C; Tablet PC 2.0)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "Touch",
+ ua: "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "Phone",
+ ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; SGH-i917)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows Phone OS 7.0 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:true",
+ expectedOS: &OSInfo{"Windows Phone OS 7.0", "Windows Phone OS", "7.0"},
+ },
+ {
+ title: "IE6",
+ ua: "Mozilla/4.0 (compatible; MSIE6.0; Windows NT 5.0; .NET CLR 1.1.4322)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows 2000 Browser:Internet Explorer-6.0 Engine:Trident Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows 2000", "Windows", "2000"},
+ },
+ {
+ title: "IE8Compatibility",
+ ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-8.0 Engine:Trident Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows 7", "Windows", "7"},
+ },
+ {
+ title: "IE10Compatibility",
+ ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "IE11Win81",
+ ua: "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows 8.1", "Windows", "8.1"},
+ },
+ {
+ title: "IE11Win7",
+ ua: "Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "IE11b32Win7b64",
+ ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "IE11b32Win7b64MDDRJS",
+ ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; MDDRJS; rv:11.0) like Gecko",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
+ },
+ {
+ title: "IE11Compatibility",
+ ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0)",
+ expected: "Mozilla:4.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:false",
+ },
- // Gecko
- {"FirefoxMac", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) Gecko/20100101 Firefox/4.0b8"},
- {"FirefoxMacLoc", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"},
- {"FirefoxLinux", "Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0"},
- {"FirefoxWin", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14"},
- {"Firefox29Win7", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0"},
- {"CaminoMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.14) Gecko/20080409 Camino/1.6 (like Firefox/2.0.0.14)"},
- {"Iceweasel", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Iceweasel/2.0 (Debian-2.0+dfsg-1)"},
- {"SeaMonkey", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0"},
- {"AndroidFirefox", "Mozilla/5.0 (Android; Mobile; rv:17.0) Gecko/17.0 Firefox/17.0"},
- {"AndroidFirefoxTablet", "Mozilla/5.0 (Android; Tablet; rv:26.0) Gecko/26.0 Firefox/26.0"},
- {"FirefoxOS", "Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0"},
- {"FirefoxOSTablet", "Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0"},
- {"FirefoxWinXP", "Mozilla/5.0 (Windows NT 5.2; rv:31.0) Gecko/20100101 Firefox/31.0"},
- {"FirefoxMRA", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:24.0) Gecko/20130405 MRA 5.5 (build 02842) Firefox/24.0 (.NET CLR 3.5.30729)"},
-
- // Opera
- {"OperaMac", "Opera/9.27 (Macintosh; Intel Mac OS X; U; en)"},
- {"OperaWin", "Opera/9.27 (Windows NT 5.1; U; en)"},
- {"OperaWinNoLocale", "Opera/9.80 (Windows NT 5.1) Presto/2.12.388 Version/12.10"},
- {"OperaWin2Comment", "Opera/9.80 (Windows NT 6.0; WOW64) Presto/2.12.388 Version/12.15"},
- {"OperaMinimal", "Opera/9.80"},
- {"OperaFull", "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.2.15 Version/10.10"},
- {"OperaLinux", "Opera/9.80 (X11; Linux x86_64) Presto/2.12.388 Version/12.10"},
- {"OperaAndroid", "Opera/9.80 (Android 4.2.1; Linux; Opera Mobi/ADR-1212030829) Presto/2.11.355 Version/12.10"},
- {"OperaNested", "Opera/9.80 (Windows NT 5.1; MRA 6.0 (build 5831)) Presto/2.12.388 Version/12.10"},
- {"OperaMRA", "Opera/9.80 (Windows NT 6.1; U; MRA 5.8 (build 4139); en) Presto/2.9.168 Version/11.50"},
-
- // Other
- {"Empty", ""},
- {"Nil", "nil"},
- {"Compatible", "Mozilla/4.0 (compatible)"},
- {"Mozilla", "Mozilla/5.0"},
- {"Amaya", "amaya/9.51 libwww/5.4.0"},
- {"Rails", "Rails Testing"},
- {"Python", "Python-urllib/2.7"},
- {"Curl", "curl/7.28.1"},
-
- // WebKit
- {"ChromeLinux", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11"},
- {"ChromeWin7", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19"},
- {"ChromeMinimal", "Mozilla/5.0 AppleWebKit/534.10 Chrome/8.0.552.215 Safari/534.10"},
- {"ChromeMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.231 Safari/534.10"},
- {"SafariMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16"},
- {"SafariWin", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en) AppleWebKit/526.9 (KHTML, like Gecko) Version/4.0dp1 Safari/526.8"},
- {"iPhone7", "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53"},
- {"iPhone", "Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419"},
- {"iPod", "Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419"},
- {"iPad", "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 Safari/531.21.10"},
- {"webOS", "Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1"},
- {"Android", "Mozilla/5.0 (Linux; U; Android 1.5; de-; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"},
- {"BlackBerry", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1+"},
- {"BB10", "Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+"},
- {"Ericsson", "Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525"},
- {"ChromeAndroid", "Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19"},
- {"WebkitNoPlatform", "Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13"},
- {"OperaWebkitMobile", "Mozilla/5.0 (Linux; Android 4.2.2; Galaxy Nexus Build/JDQ39) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Mobile Safari/537.31 OPR/14.0.1074.57453"},
- {"OperaWebkitDesktop", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Safari/537.31 OPR/14.0.1074.57453"},
- {"ChromeNothingAfterU", "Mozilla/5.0 (Linux; U) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4"},
- {"SafariOnSymbian", "Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413"},
-}
-
-// Slice of the expected results from the previous slice.
-var expected = []string{
- // Bots
- "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:false",
- "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:true",
- "Mozilla:5.0 Browser:bingbot-2.0 Bot:true Mobile:false",
- "Mozilla:5.0 Browser:Baiduspider-2.0 Bot:true Mobile:false",
- "Browser:Twitterbot Bot:true Mobile:false",
- "Mozilla:5.0 Browser:Yahoo! Slurp Bot:true Mobile:false",
- "Browser:facebookexternalhit-1.1 Bot:true Mobile:false",
- "Browser:facebookplatform-1.0 Bot:true Mobile:false",
- "Browser:Facebot Bot:true Mobile:false",
-
- // Internet Explorer
- "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:4.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:4.0 Platform:Windows OS:Windows Phone OS 7.0 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:true",
- "Mozilla:4.0 Platform:Windows OS:Windows 2000 Browser:Internet Explorer-6.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-8.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
- "Mozilla:4.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:false",
+ // Microsoft Edge
+ {
+ title: "EdgeDesktop",
+ ua: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 10 Browser:Edge-12.10240 Engine:EdgeHTML Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows 10", "Windows", "10"},
+ },
+ {
+ title: "EdgeMobile",
+ ua: "Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; DEVICE INFO) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10240",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows Phone 10.0 Browser:Edge-12.10240 Engine:EdgeHTML Bot:false Mobile:true",
+ },
// Gecko
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Browser:Firefox-4.0b8 Engine:Gecko-20100101 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:Firefox-3.6.13 Engine:Gecko-20101203 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Firefox-17.0 Engine:Gecko-20100101 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-2.0.0.14 Engine:Gecko-20080404 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Firefox-29.0 Engine:Gecko-20100101 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Camino-1.6 Engine:Gecko-20080409 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:X11 OS:Linux i686 Localization:en-US Browser:Iceweasel-2.0 Engine:Gecko-20061024 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:SeaMonkey-2.0 Engine:Gecko-20091017 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Mobile OS:Android Browser:Firefox-17.0 Engine:Gecko-17.0 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Tablet OS:Android Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Mobile OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Tablet OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Windows OS:Windows XP x64 Edition Browser:Firefox-31.0 Engine:Gecko-20100101 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-24.0 Engine:Gecko-20130405 Bot:false Mobile:false",
+ {
+ title: "FirefoxMac",
+ ua: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) Gecko/20100101 Firefox/4.0b8",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Browser:Firefox-4.0b8 Engine:Gecko-20100101 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Intel Mac OS X 10.6", "Mac OS X", "10.6"},
+ },
+ {
+ title: "FirefoxMacLoc",
+ ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:Firefox-3.6.13 Engine:Gecko-20101203 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Intel Mac OS X 10.6", "Mac OS X", "10.6"},
+ },
+ {
+ title: "FirefoxLinux",
+ ua: "Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Firefox-17.0 Engine:Gecko-20100101 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Linux x86_64", "Linux", ""},
+ },
+ {
+ title: "FirefoxLinux - Ubuntu V50",
+ ua: "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0",
+ expected: "Mozilla:5.0 Platform:X11 OS:Ubuntu Browser:Firefox-50.0 Engine:Gecko-20100101 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Ubuntu", "Ubuntu", ""},
+ },
+ {
+ title: "FirefoxWin",
+ ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-2.0.0.14 Engine:Gecko-20080404 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows XP", "Windows", "XP"},
+ },
+ {
+ title: "Firefox29Win7",
+ ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Firefox-29.0 Engine:Gecko-20100101 Bot:false Mobile:false",
+ },
+ {
+ title: "CaminoMac",
+ ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.14) Gecko/20080409 Camino/1.6 (like Firefox/2.0.0.14)",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Camino-1.6 Engine:Gecko-20080409 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Intel Mac OS X", "Mac OS X", ""},
+ },
+ {
+ title: "Iceweasel",
+ ua: "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Iceweasel/2.0 (Debian-2.0+dfsg-1)",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux i686 Localization:en-US Browser:Iceweasel-2.0 Engine:Gecko-20061024 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Linux i686", "Linux", ""},
+ },
+ {
+ title: "SeaMonkey",
+ ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:SeaMonkey-2.0 Engine:Gecko-20091017 Bot:false Mobile:false",
+ },
+ {
+ title: "AndroidFirefox",
+ ua: "Mozilla/5.0 (Android; Mobile; rv:17.0) Gecko/17.0 Firefox/17.0",
+ expected: "Mozilla:5.0 Platform:Mobile OS:Android Browser:Firefox-17.0 Engine:Gecko-17.0 Bot:false Mobile:true",
+ },
+ {
+ title: "AndroidFirefoxTablet",
+ ua: "Mozilla/5.0 (Android; Tablet; rv:26.0) Gecko/26.0 Firefox/26.0",
+ expected: "Mozilla:5.0 Platform:Tablet OS:Android Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"Android", "Android", ""},
+ },
+ {
+ title: "FirefoxOS",
+ ua: "Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0",
+ expected: "Mozilla:5.0 Platform:Mobile OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"FirefoxOS", "FirefoxOS", ""},
+ },
+ {
+ title: "FirefoxOSTablet",
+ ua: "Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0",
+ expected: "Mozilla:5.0 Platform:Tablet OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
+ },
+ {
+ title: "FirefoxWinXP",
+ ua: "Mozilla/5.0 (Windows NT 5.2; rv:31.0) Gecko/20100101 Firefox/31.0",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows XP x64 Edition Browser:Firefox-31.0 Engine:Gecko-20100101 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows XP x64 Edition", "Windows", "XP"},
+ },
+ {
+ title: "FirefoxMRA",
+ ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:24.0) Gecko/20130405 MRA 5.5 (build 02842) Firefox/24.0 (.NET CLR 3.5.30729)",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-24.0 Engine:Gecko-20130405 Bot:false Mobile:false",
+ },
// Opera
- "Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
- "Platform:Windows OS:Windows XP Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
- "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
- "Platform:Windows OS:Windows Vista Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
- "Browser:Opera-9.80 Engine:Presto Bot:false Mobile:false",
- "Platform:Windows OS:Windows Vista Localization:en Browser:Opera-9.80 Engine:Presto-2.2.15 Bot:false Mobile:false",
- "Platform:X11 OS:Linux x86_64 Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
- "Platform:Android 4.2.1 OS:Linux Browser:Opera-9.80 Engine:Presto-2.11.355 Bot:false Mobile:true",
- "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
- "Platform:Windows OS:Windows 7 Localization:en Browser:Opera-9.80 Engine:Presto-2.9.168 Bot:false Mobile:false",
+ {
+ title: "OperaMac",
+ ua: "Opera/9.27 (Macintosh; Intel Mac OS X; U; en)",
+ expected: "Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Intel Mac OS X", "Mac OS X", ""},
+ },
+ {
+ title: "OperaWin",
+ ua: "Opera/9.27 (Windows NT 5.1; U; en)",
+ expected: "Platform:Windows OS:Windows XP Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
+ },
+ {
+ title: "OperaWinNoLocale",
+ ua: "Opera/9.80 (Windows NT 5.1) Presto/2.12.388 Version/12.10",
+ expected: "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
+ },
+ {
+ title: "OperaWin2Comment",
+ ua: "Opera/9.80 (Windows NT 6.0; WOW64) Presto/2.12.388 Version/12.15",
+ expected: "Platform:Windows OS:Windows Vista Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Windows Vista", "Windows", "Vista"},
+ },
+ {
+ title: "OperaMinimal",
+ ua: "Opera/9.80",
+ expected: "Browser:Opera-9.80 Engine:Presto Bot:false Mobile:false",
+ },
+ {
+ title: "OperaFull",
+ ua: "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.2.15 Version/10.10",
+ expected: "Platform:Windows OS:Windows Vista Localization:en Browser:Opera-9.80 Engine:Presto-2.2.15 Bot:false Mobile:false",
+ },
+ {
+ title: "OperaLinux",
+ ua: "Opera/9.80 (X11; Linux x86_64) Presto/2.12.388 Version/12.10",
+ expected: "Platform:X11 OS:Linux x86_64 Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
+ },
+ {
+ title: "OperaLinux - Ubuntu V41",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36 OPR/41.0.2353.69",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Opera-41.0.2353.69 Engine:AppleWebKit-537.36 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Linux x86_64", "Linux", ""},
+ },
+ {
+ title: "OperaAndroid",
+ ua: "Opera/9.80 (Android 4.2.1; Linux; Opera Mobi/ADR-1212030829) Presto/2.11.355 Version/12.10",
+ expected: "Platform:Android 4.2.1 OS:Linux Browser:Opera-9.80 Engine:Presto-2.11.355 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"Linux", "Linux", ""},
+ },
+ {
+ title: "OperaNested",
+ ua: "Opera/9.80 (Windows NT 5.1; MRA 6.0 (build 5831)) Presto/2.12.388 Version/12.10",
+ expected: "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
+ },
+ {
+ title: "OperaMRA",
+ ua: "Opera/9.80 (Windows NT 6.1; U; MRA 5.8 (build 4139); en) Presto/2.9.168 Version/11.50",
+ expected: "Platform:Windows OS:Windows 7 Localization:en Browser:Opera-9.80 Engine:Presto-2.9.168 Bot:false Mobile:false",
+ },
// Other
- "Bot:false Mobile:false",
- "Browser:nil Bot:false Mobile:false",
- "Browser:Mozilla-4.0 Bot:false Mobile:false",
- "Browser:Mozilla-5.0 Bot:false Mobile:false",
- "Browser:amaya-9.51 Engine:libwww-5.4.0 Bot:false Mobile:false",
- "Browser:Rails Engine:Testing Bot:false Mobile:false",
- "Browser:Python-urllib-2.7 Bot:false Mobile:false",
- "Browser:curl-7.28.1 Bot:false Mobile:false",
+ {
+ title: "Empty",
+ ua: "",
+ expected: "Bot:false Mobile:false",
+ },
+ {
+ title: "Nil",
+ ua: "nil",
+ expected: "Browser:nil Bot:false Mobile:false",
+ },
+ {
+ title: "Compatible",
+ ua: "Mozilla/4.0 (compatible)",
+ expected: "Browser:Mozilla-4.0 Bot:false Mobile:false",
+ },
+ {
+ title: "Mozilla",
+ ua: "Mozilla/5.0",
+ expected: "Browser:Mozilla-5.0 Bot:false Mobile:false",
+ },
+ {
+ title: "Amaya",
+ ua: "amaya/9.51 libwww/5.4.0",
+ expected: "Browser:amaya-9.51 Engine:libwww-5.4.0 Bot:false Mobile:false",
+ },
+ {
+ title: "Rails",
+ ua: "Rails Testing",
+ expected: "Browser:Rails Engine:Testing Bot:false Mobile:false",
+ },
+ {
+ title: "Python",
+ ua: "Python-urllib/2.7",
+ expected: "Browser:Python-urllib-2.7 Bot:false Mobile:false",
+ },
+ {
+ title: "Curl",
+ ua: "curl/7.28.1",
+ expected: "Browser:curl-7.28.1 Bot:false Mobile:false",
+ },
// WebKit
- "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chrome-23.0.1271.97 Engine:AppleWebKit-537.11 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Chrome-18.0.1025.168 Engine:AppleWebKit-535.19 Bot:false Mobile:false",
- "Mozilla:5.0 Browser:Chrome-8.0.552.215 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_5 Localization:en-US Browser:Chrome-8.0.552.231 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_3 Localization:en-us Browser:Safari-5.0 Engine:AppleWebKit-533.16 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en Browser:Safari-4.0dp1 Engine:AppleWebKit-526.9 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:iPhone OS:CPU iPhone OS 7_0_3 like Mac OS X Browser:Safari-7.0 Engine:AppleWebKit-537.51.1 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:iPhone OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:iPod OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:iPad OS:CPU OS 3_2 like Mac OS X Localization:en-us Browser:Safari-4.0.4 Engine:AppleWebKit-531.21.10 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:webOS OS:Palm Localization:en-US Browser:webOS-1.0 Engine:AppleWebKit-532.2 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Linux OS:Android 1.5 Localization:de- Browser:Android-3.1.2 Engine:AppleWebKit-528.5+ Bot:false Mobile:true",
- "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry 9800 Localization:en Browser:BlackBerry-6.0.0.141 Engine:AppleWebKit-534.1+ Bot:false Mobile:true",
- "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry Browser:BlackBerry-10.0.9.388 Engine:AppleWebKit-537.3+ Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.4 Browser:Symbian-3.0 Engine:AppleWebKit-525 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:Linux OS:Android 4.2.1 Browser:Chrome-18.0.1025.166 Engine:AppleWebKit-535.19 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:en-us Localization:en-us Browser:Safari-3.1 Engine:AppleWebKit-525.13 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:true",
- "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Linux OS:Linux Browser:Chrome-22.0.1229.79 Engine:AppleWebKit-537.4 Bot:false Mobile:false",
- "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.1 Browser:Symbian-413 Engine:AppleWebKit-413 Bot:false Mobile:true",
+ {
+ title: "ChromeLinux",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chrome-23.0.1271.97 Engine:AppleWebKit-537.11 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Linux x86_64", "Linux", ""},
+ },
+ {
+ title: "ChromeLinux - Ubuntu V55",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chrome-55.0.2883.75 Engine:AppleWebKit-537.36 Bot:false Mobile:false",
+ },
+ {
+ title: "ChromeWin7",
+ ua: "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Chrome-18.0.1025.168 Engine:AppleWebKit-535.19 Bot:false Mobile:false",
+ },
+ {
+ title: "ChromeMinimal",
+ ua: "Mozilla/5.0 AppleWebKit/534.10 Chrome/8.0.552.215 Safari/534.10",
+ expected: "Mozilla:5.0 Browser:Chrome-8.0.552.215 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
+ },
+ {
+ title: "ChromeMac",
+ ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.231 Safari/534.10",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_5 Localization:en-US Browser:Chrome-8.0.552.231 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
+ expectedOS: &OSInfo{"Intel Mac OS X 10_6_5", "Mac OS X", "10.6.5"},
+ },
+ {
+ title: "SafariMac",
+ ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16",
+ expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_3 Localization:en-us Browser:Safari-5.0 Engine:AppleWebKit-533.16 Bot:false Mobile:false",
+ },
+ {
+ title: "SafariWin",
+ ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en) AppleWebKit/526.9 (KHTML, like Gecko) Version/4.0dp1 Safari/526.8",
+ expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en Browser:Safari-4.0dp1 Engine:AppleWebKit-526.9 Bot:false Mobile:false",
+ },
+ {
+ title: "iPhone7",
+ ua: "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53",
+ expected: "Mozilla:5.0 Platform:iPhone OS:CPU iPhone OS 7_0_3 like Mac OS X Browser:Safari-7.0 Engine:AppleWebKit-537.51.1 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"CPU iPhone OS 7_0_3 like Mac OS X", "iPhone OS", "7.0.3"},
+ },
+ {
+ title: "iPhone",
+ ua: "Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419",
+ expected: "Mozilla:5.0 Platform:iPhone OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
+ },
+ {
+ title: "iPod",
+ ua: "Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419",
+ expected: "Mozilla:5.0 Platform:iPod OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
+ },
+ {
+ title: "iPad",
+ ua: "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 Safari/531.21.10",
+ expected: "Mozilla:5.0 Platform:iPad OS:CPU OS 3_2 like Mac OS X Localization:en-us Browser:Safari-4.0.4 Engine:AppleWebKit-531.21.10 Bot:false Mobile:true",
+ },
+ {
+ title: "webOS",
+ ua: "Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1",
+ expected: "Mozilla:5.0 Platform:webOS OS:Palm Localization:en-US Browser:webOS-1.0 Engine:AppleWebKit-532.2 Bot:false Mobile:true",
+ },
+ {
+ title: "Android",
+ ua: "Mozilla/5.0 (Linux; U; Android 1.5; de-; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 1.5 Localization:de- Browser:Android-3.1.2 Engine:AppleWebKit-528.5+ Bot:false Mobile:true",
+ },
+ {
+ title: "BlackBerry",
+ ua: "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1+",
+ expected: "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry 9800 Localization:en Browser:BlackBerry-6.0.0.141 Engine:AppleWebKit-534.1+ Bot:false Mobile:true",
+ expectedOS: &OSInfo{"BlackBerry 9800", "BlackBerry", "9800"},
+ },
+ {
+ title: "BB10",
+ ua: "Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+",
+ expected: "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry Browser:BlackBerry-10.0.9.388 Engine:AppleWebKit-537.3+ Bot:false Mobile:true",
+ },
+ {
+ title: "Ericsson",
+ ua: "Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
+ expected: "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.4 Browser:Symbian-3.0 Engine:AppleWebKit-525 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"SymbianOS/9.4", "SymbianOS", "9.4"},
+ },
+ {
+ title: "ChromeAndroid",
+ ua: "Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.1 Browser:Chrome-18.0.1025.166 Engine:AppleWebKit-535.19 Bot:false Mobile:true",
+ },
+ {
+ title: "WebkitNoPlatform",
+ ua: "Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
+ expected: "Mozilla:5.0 Platform:en-us Localization:en-us Browser:Safari-3.1 Engine:AppleWebKit-525.13 Bot:false Mobile:false",
+ },
+ {
+ title: "OperaWebkitMobile",
+ ua: "Mozilla/5.0 (Linux; Android 4.2.2; Galaxy Nexus Build/JDQ39) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Mobile Safari/537.31 OPR/14.0.1074.57453",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:true",
+ },
+ {
+ title: "OperaWebkitDesktop",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Safari/537.31 OPR/14.0.1074.57453",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:false",
+ },
+ {
+ title: "ChromeNothingAfterU",
+ ua: "Mozilla/5.0 (Linux; U) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4",
+ expected: "Mozilla:5.0 Platform:Linux OS:Linux Browser:Chrome-22.0.1229.79 Engine:AppleWebKit-537.4 Bot:false Mobile:false",
+ },
+ {
+ title: "SafariOnSymbian",
+ ua: "Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
+ expected: "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.1 Browser:Symbian-413 Engine:AppleWebKit-413 Bot:false Mobile:true",
+ },
+ {
+ title: "Chromium - Ubuntu V49",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chromium-49.0.2623.108 Engine:AppleWebKit-537.36 Bot:false Mobile:false",
+ },
+ {
+ title: "Chromium - Ubuntu V55",
+ ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/53.0.2785.143 Chrome/53.0.2785.143 Safari/537.36",
+ expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chromium-53.0.2785.143 Engine:AppleWebKit-537.36 Bot:false Mobile:false",
+ },
+
+ // Dalvik
+ {
+ title: "Dalvik - Dell:001DL",
+ ua: "Dalvik/1.2.0 (Linux; U; Android 2.2.2; 001DL Build/FRG83G)",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 2.2.2 Bot:false Mobile:true",
+ },
+ {
+ title: "Dalvik - HTC:001HT",
+ ua: "Dalvik/1.4.0 (Linux; U; Android 2.3.3; 001HT Build/GRI40)",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 2.3.3 Bot:false Mobile:true",
+ },
+ {
+ title: "Dalvik - ZTE:009Z",
+ ua: "Dalvik/1.4.0 (Linux; U; Android 2.3.4; 009Z Build/GINGERBREAD)",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 2.3.4 Bot:false Mobile:true",
+ },
+ {
+ title: "Dalvik - A850",
+ ua: "Dalvik/1.6.0 (Linux; U; Android 4.2.2; A850 Build/JDQ39) Configuration/CLDC-1.1; Opera Mini/att/4.2",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Bot:false Mobile:true",
+ },
+ {
+ title: "Dalvik - Asus:T00Q",
+ ua: "Dalvik/1.6.0 (Linux; U; Android 4.4.2; ASUS_T00Q Build/KVT49L)/CLDC-1.1",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 4.4.2 Bot:false Mobile:true",
+ expectedOS: &OSInfo{"Android 4.4.2", "Android", "4.4.2"},
+ },
+ {
+ title: "Dalvik - W2430",
+ ua: "Dalvik/1.6.0 (Linux; U; Android 4.0.4; W2430 Build/IMM76D)014; Profile/MIDP-2.1 Configuration/CLDC-1",
+ expected: "Mozilla:5.0 Platform:Linux OS:Android 4.0.4 Bot:false Mobile:true",
+ },
}
// Internal: beautify the UserAgent reference into a string so it can be
@@ -235,11 +560,18 @@ func beautify(ua *UserAgent) (s string) {
// The test suite.
func TestUserAgent(t *testing.T) {
- for i, tt := range uastrings {
+ for _, tt := range uastrings {
ua := New(tt.ua)
got := beautify(ua)
- if expected[i] != got {
- t.Errorf("Test %v => %q, expected %q", tt.name, got, expected[i])
+ if tt.expected != got {
+ t.Errorf("\nTest %v\ngot: %q\nexpected %q\n", tt.title, got, tt.expected)
+ }
+
+ if tt.expectedOS != nil {
+ gotOSInfo := ua.OSInfo()
+ if !reflect.DeepEqual(tt.expectedOS, &gotOSInfo) {
+ t.Errorf("\nTest %v\ngot: %#v\nexpected %#v\n", tt.title, gotOSInfo, tt.expectedOS)
+ }
}
}
}
diff --git a/vendor/github.com/mssola/user_agent/bot.go b/vendor/github.com/mssola/user_agent/bot.go
index efcab9253..a6222d17f 100644
--- a/vendor/github.com/mssola/user_agent/bot.go
+++ b/vendor/github.com/mssola/user_agent/bot.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2014 Miquel Sabaté Solà <mikisabate@gmail.com>
+// Copyright (C) 2014-2017 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
@@ -9,6 +9,8 @@ import (
"strings"
)
+var botFromSiteRegexp = regexp.MustCompile("http://.+\\.\\w+")
+
// Get the name of the bot from the website that may be in the given comment. If
// there is no website in the comment, then an empty string is returned.
func getFromSite(comment []string) string {
@@ -23,8 +25,7 @@ func getFromSite(comment []string) string {
}
// Pick the site.
- re := regexp.MustCompile("http://.+\\.\\w+")
- results := re.FindStringSubmatch(comment[idx])
+ results := botFromSiteRegexp.FindStringSubmatch(comment[idx])
if len(results) == 1 {
// If it's a simple comment, just return the name of the site.
if idx == 0 {
@@ -74,6 +75,8 @@ func (p *UserAgent) fixOther(sections []section) {
}
}
+var botRegex = regexp.MustCompile("(?i)(bot|crawler|sp(i|y)der|search|worm|fetch|nutch)")
+
// Check if we're dealing with a bot or with some weird browser. If that is the
// case, the receiver will be modified accordingly.
func (p *UserAgent) checkBot(sections []section) {
@@ -82,9 +85,8 @@ func (p *UserAgent) checkBot(sections []section) {
if len(sections) == 1 && sections[0].name != "Mozilla" {
p.mozilla = ""
- // Check whether the name has some suspicious "bot" in his name.
- reg, _ := regexp.Compile("(?i)bot")
- if reg.Match([]byte(sections[0].name)) {
+ // Check whether the name has some suspicious "bot" or "crawler" in his name.
+ if botRegex.Match([]byte(sections[0].name)) {
p.setSimple(sections[0].name, "", true)
return
}
diff --git a/vendor/github.com/mssola/user_agent/browser.go b/vendor/github.com/mssola/user_agent/browser.go
index 74fb931ef..fbed92176 100644
--- a/vendor/github.com/mssola/user_agent/browser.go
+++ b/vendor/github.com/mssola/user_agent/browser.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2012-2014 Miquel Sabaté Solà <mikisabate@gmail.com>
+// Copyright (C) 2012-2017 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
@@ -9,6 +9,8 @@ import (
"strings"
)
+var ie11Regexp = regexp.MustCompile("^rv:(.+)$")
+
// A struct containing all the information that we might be
// interested from the browser.
type Browser struct {
@@ -34,27 +36,46 @@ func (p *UserAgent) detectBrowser(sections []section) {
slen := len(sections)
if sections[0].name == "Opera" {
- p.mozilla = ""
p.browser.Name = "Opera"
p.browser.Version = sections[0].version
p.browser.Engine = "Presto"
if slen > 1 {
p.browser.EngineVersion = sections[1].version
}
+ } else if sections[0].name == "Dalvik" {
+ // When Dalvik VM is in use, there is no browser info attached to ua.
+ // Although browser is still a Mozilla/5.0 compatible.
+ p.mozilla = "5.0"
} else if slen > 1 {
engine := sections[1]
p.browser.Engine = engine.name
p.browser.EngineVersion = engine.version
if slen > 2 {
- p.browser.Version = sections[2].version
+ sectionIndex := 2
+ // The version after the engine comment is empty on e.g. Ubuntu
+ // platforms so if this is the case, let's use the next in line.
+ if sections[2].version == "" && slen > 3 {
+ sectionIndex = 3
+ }
+ p.browser.Version = sections[sectionIndex].version
if engine.name == "AppleWebKit" {
- if sections[slen-1].name == "OPR" {
+ switch sections[slen-1].name {
+ case "Edge":
+ p.browser.Name = "Edge"
+ p.browser.Version = sections[slen-1].version
+ p.browser.Engine = "EdgeHTML"
+ p.browser.EngineVersion = ""
+ case "OPR":
p.browser.Name = "Opera"
p.browser.Version = sections[slen-1].version
- } else if sections[2].name == "Chrome" {
- p.browser.Name = "Chrome"
- } else {
- p.browser.Name = "Safari"
+ default:
+ if sections[sectionIndex].name == "Chrome" {
+ p.browser.Name = "Chrome"
+ } else if sections[sectionIndex].name == "Chromium" {
+ p.browser.Name = "Chromium"
+ } else {
+ p.browser.Name = "Safari"
+ }
}
} else if engine.name == "Gecko" {
name := sections[2].name
@@ -67,9 +88,8 @@ func (p *UserAgent) detectBrowser(sections []section) {
// This is the new user agent from Internet Explorer 11.
p.browser.Engine = "Trident"
p.browser.Name = "Internet Explorer"
- reg, _ := regexp.Compile("^rv:(.+)$")
for _, c := range sections[0].comment {
- version := reg.FindStringSubmatch(c)
+ version := ie11Regexp.FindStringSubmatch(c)
if len(version) > 0 {
p.browser.Version = version[1]
return
diff --git a/vendor/github.com/mssola/user_agent/operating_systems.go b/vendor/github.com/mssola/user_agent/operating_systems.go
index 0b1e93d29..aebd8b394 100644
--- a/vendor/github.com/mssola/user_agent/operating_systems.go
+++ b/vendor/github.com/mssola/user_agent/operating_systems.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2012-2014 Miquel Sabaté Solà <mikisabate@gmail.com>
+// Copyright (C) 2012-2017 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
@@ -6,13 +6,26 @@ package user_agent
import "strings"
+// Represents full information on the operating system extracted from the user agent.
+type OSInfo struct {
+ // Full name of the operating system. This is identical to the output of ua.OS()
+ FullName string
+
+ // Name of the operating system. This is sometimes a shorter version of the
+ // operating system name, e.g. "Mac OS X" instead of "Intel Mac OS X"
+ Name string
+
+ // Operating system version, e.g. 7 for Windows 7 or 10.8 for Max OS X Mountain Lion
+ Version string
+}
+
// Normalize the name of the operating system. By now, this just
-// affects to Windows.
+// affects to Windows NT.
//
// Returns a string containing the normalized name for the Operating System.
func normalizeOS(name string) string {
sp := strings.SplitN(name, " ", 3)
- if len(sp) != 3 {
+ if len(sp) != 3 || sp[1] != "NT" {
return name
}
@@ -33,7 +46,7 @@ func normalizeOS(name string) string {
return "Windows 8"
case "6.3":
return "Windows 8.1"
- case "6.4":
+ case "10.0":
return "Windows 10"
}
return name
@@ -126,7 +139,9 @@ func gecko(p *UserAgent, comment []string) {
}
}
}
- if len(comment) > 3 {
+ // Only parse 4th comment as localization if it doesn't start with rv:.
+ // For example Firefox on Ubuntu contains "rv:XX.X" in this field.
+ if len(comment) > 3 && !strings.HasPrefix(comment[3], "rv:") {
p.localization = comment[3]
}
}
@@ -193,6 +208,23 @@ func opera(p *UserAgent, comment []string) {
}
}
+// Guess the OS. Android browsers send Dalvik as the user agent in the
+// request header.
+//
+// The first argument p is a reference to the current UserAgent and the second
+// argument is a slice of strings containing the comment.
+func dalvik(p *UserAgent, comment []string) {
+ slen := len(comment)
+
+ if strings.HasPrefix(comment[0], "Linux") {
+ p.platform = comment[0]
+ if slen > 2 {
+ p.os = comment[2]
+ }
+ p.mobile = true
+ }
+}
+
// Given the comment of the first section of the UserAgent string,
// get the platform.
func getPlatform(comment []string) string {
@@ -238,6 +270,10 @@ func (p *UserAgent) detectOS(s section) {
if len(s.comment) > 0 {
opera(p, s.comment)
}
+ } else if s.name == "Dalvik" {
+ if len(s.comment) > 0 {
+ dalvik(p, s.comment)
+ }
} else {
// Check whether this is a bot or just a weird browser.
p.undecided = true
@@ -258,3 +294,64 @@ func (p *UserAgent) OS() string {
func (p *UserAgent) Localization() string {
return p.localization
}
+
+// Return OS name and version from a slice of strings created from the full name of the OS.
+func osName(osSplit []string) (name, version string) {
+ if len(osSplit) == 1 {
+ name = osSplit[0]
+ version = ""
+ } else {
+ // Assume version is stored in the last part of the array.
+ nameSplit := osSplit[:len(osSplit)-1]
+ version = osSplit[len(osSplit)-1]
+
+ // Nicer looking Mac OS X
+ if len(nameSplit) >= 2 && nameSplit[0] == "Intel" && nameSplit[1] == "Mac" {
+ nameSplit = nameSplit[1:]
+ }
+ name = strings.Join(nameSplit, " ")
+
+ if strings.Contains(version, "x86") || strings.Contains(version, "i686") {
+ // x86_64 and i868 are not Linux versions but architectures
+ version = ""
+ } else if version == "X" && name == "Mac OS" {
+ // X is not a version for Mac OS.
+ name = name + " " + version
+ version = ""
+ }
+ }
+ return name, version
+}
+
+// Returns combined information for the operating system.
+func (p *UserAgent) OSInfo() OSInfo {
+ // Special case for iPhone weirdness
+ os := strings.Replace(p.os, "like Mac OS X", "", 1)
+ os = strings.Replace(os, "CPU", "", 1)
+ os = strings.Trim(os, " ")
+
+ osSplit := strings.Split(os, " ")
+
+ // Special case for x64 edition of Windows
+ if os == "Windows XP x64 Edition" {
+ osSplit = osSplit[:len(osSplit)-2]
+ }
+
+ name, version := osName(osSplit)
+
+ // Special case for names that contain a forward slash version separator.
+ if strings.Contains(name, "/") {
+ s := strings.Split(name, "/")
+ name = s[0]
+ version = s[1]
+ }
+
+ // Special case for versions that use underscores
+ version = strings.Replace(version, "_", ".", -1)
+
+ return OSInfo{
+ FullName: p.os,
+ Name: name,
+ Version: version,
+ }
+}
diff --git a/vendor/github.com/mssola/user_agent/user_agent.go b/vendor/github.com/mssola/user_agent/user_agent.go
index 74ddf273c..36e8d1bfa 100644
--- a/vendor/github.com/mssola/user_agent/user_agent.go
+++ b/vendor/github.com/mssola/user_agent/user_agent.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2012-2014 Miquel Sabaté Solà <mikisabate@gmail.com>
+// Copyright (C) 2012-2017 Miquel Sabaté Solà <mikisabate@gmail.com>
// This file is licensed under the MIT license.
// See the LICENSE file.
@@ -8,9 +8,7 @@
// information that has been extracted from a parsed User Agent string.
package user_agent
-import (
- "strings"
-)
+import "strings"
// A section contains the name of the product, its version and
// an optional comment.
@@ -141,7 +139,9 @@ func (p *UserAgent) Parse(ua string) {
}
if len(sections) > 0 {
- p.mozilla = sections[0].version
+ if sections[0].name == "Mozilla" {
+ p.mozilla = sections[0].version
+ }
p.detectBrowser(sections)
p.detectOS(sections[0])
@@ -167,3 +167,8 @@ func (p *UserAgent) Bot() bool {
func (p *UserAgent) Mobile() bool {
return p.mobile
}
+
+// Returns the original given user agent.
+func (p *UserAgent) UA() string {
+ return p.ua
+}
diff --git a/vendor/github.com/nicksnyder/go-i18n/.travis.yml b/vendor/github.com/nicksnyder/go-i18n/.travis.yml
index 527eb6475..8937ab6bf 100644
--- a/vendor/github.com/nicksnyder/go-i18n/.travis.yml
+++ b/vendor/github.com/nicksnyder/go-i18n/.travis.yml
@@ -8,4 +8,4 @@ go:
- 1.6
- 1.7
- 1.8
- - tip
+ - 1.9
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go
index 5d6b6ad4f..58971033f 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go
@@ -81,9 +81,9 @@ var codeTemplate = template.Must(template.New("spec").Parse(`package language
func init() {
{{range .PluralGroups}}
- registerPluralSpec({{printf "%#v" .SplitLocales}}, &PluralSpec{
+ RegisterPluralSpec({{printf "%#v" .SplitLocales}}, &PluralSpec{
Plurals: newPluralSet({{range $i, $e := .PluralRules}}{{if $i}}, {{end}}{{$e.CountTitle}}{{end}}),
- PluralFunc: func(ops *operands) Plural { {{range .PluralRules}}{{if .GoCondition}}
+ PluralFunc: func(ops *Operands) Plural { {{range .PluralRules}}{{if .GoCondition}}
// {{.Condition}}
if {{.GoCondition}} {
return {{.CountTitle}}
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/plurals.xml b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/plurals.xml
index cdd0b5296..3310c8ee2 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/plurals.xml
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/plurals.xml
@@ -6,13 +6,13 @@ CLDR data files are interpreted according to the LDML specification (http://unic
For terms of use, see http://www.unicode.org/copyright.html
-->
<supplementalData>
- <version number="$Revision: 12002 $"/>
+ <version number="$Revision: 13253 $"/>
<plurals type="cardinal">
<!-- For a canonicalized list, use GeneratedPluralSamples -->
<!-- 1: other -->
- <pluralRules locales="bm bo dz id ig ii in ja jbo jv jw kde kea km ko lkt lo ms my nqo root sah ses sg th to vi wo yo zh">
+ <pluralRules locales="bm bo dz id ig ii in ja jbo jv jw kde kea km ko lkt lo ms my nqo root sah ses sg th to vi wo yo yue zh">
<pluralRule count="other"> @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
</pluralRules>
@@ -26,6 +26,10 @@ For terms of use, see http://www.unicode.org/copyright.html
<pluralRule count="one">i = 0,1 @integer 0, 1 @decimal 0.0~1.5</pluralRule>
<pluralRule count="other"> @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
</pluralRules>
+ <pluralRules locales="pt">
+ <pluralRule count="one">i = 0..1 @integer 0, 1 @decimal 0.0~1.5</pluralRule>
+ <pluralRule count="other"> @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
+ </pluralRules>
<pluralRules locales="ast ca de en et fi fy gl it ji nl sv sw ur yi">
<pluralRule count="one">i = 1 and v = 0 @integer 1</pluralRule>
<pluralRule count="other"> @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
@@ -42,18 +46,10 @@ For terms of use, see http://www.unicode.org/copyright.html
<pluralRule count="one">n = 0..1 or n = 11..99 @integer 0, 1, 11~24 @decimal 0.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0</pluralRule>
<pluralRule count="other"> @integer 2~10, 100~106, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
</pluralRules>
- <pluralRules locales="pt">
- <pluralRule count="one">n = 0..2 and n != 2 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000</pluralRule>
- <pluralRule count="other"> @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
- </pluralRules>
<pluralRules locales="af asa az bem bez bg brx ce cgg chr ckb dv ee el eo es eu fo fur gsw ha haw hu jgo jmc ka kaj kcg kk kkj kl ks ksb ku ky lb lg mas mgo ml mn nah nb nd ne nn nnh no nr ny nyn om or os pap ps rm rof rwk saq sdh seh sn so sq ss ssy st syr ta te teo tig tk tn tr ts ug uz ve vo vun wae xh xog">
<pluralRule count="one">n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000</pluralRule>
<pluralRule count="other"> @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
</pluralRules>
- <pluralRules locales="pt_PT">
- <pluralRule count="one">n = 1 and v = 0 @integer 1</pluralRule>
- <pluralRule count="other"> @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
- </pluralRules>
<pluralRules locales="da">
<pluralRule count="one">n = 1 or t != 0 and i = 0,1 @integer 1 @decimal 0.1~1.6</pluralRule>
<pluralRule count="other"> @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0~3.4, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …</pluralRule>
@@ -210,7 +206,7 @@ For terms of use, see http://www.unicode.org/copyright.html
<!-- 6: zero,one,two,few,many,other -->
- <pluralRules locales="ar">
+ <pluralRules locales="ar ars">
<pluralRule count="zero">n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000</pluralRule>
<pluralRule count="one">n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000</pluralRule>
<pluralRule count="two">n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000</pluralRule>
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/language_test.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language_test.go
index 2949bfe4a..1ab3314d6 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/language_test.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language_test.go
@@ -18,8 +18,8 @@ func TestParse(t *testing.T) {
{"zh-TW", []*Language{{"zh-tw", pluralSpecs["zh"]}}},
{"pt-BR", []*Language{{"pt-br", pluralSpecs["pt"]}}},
{"pt_BR", []*Language{{"pt-br", pluralSpecs["pt"]}}},
- {"pt-PT", []*Language{{"pt-pt", pluralSpecs["pt-pt"]}}},
- {"pt_PT", []*Language{{"pt-pt", pluralSpecs["pt-pt"]}}},
+ {"pt-PT", []*Language{{"pt-pt", pluralSpecs["pt"]}}},
+ {"pt_PT", []*Language{{"pt-pt", pluralSpecs["pt"]}}},
{"zh-Hans-CN", []*Language{{"zh-hans-cn", pluralSpecs["zh"]}}},
{"zh-Hant-TW", []*Language{{"zh-hant-tw", pluralSpecs["zh"]}}},
{"en-US-en-US", []*Language{{"en-us-en-us", pluralSpecs["en"]}}},
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go
index 877bcc89d..49ee7dc7c 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go
@@ -7,7 +7,7 @@ import (
)
// http://unicode.org/reports/tr35/tr35-numbers.html#Operands
-type operands struct {
+type Operands struct {
N float64 // absolute value of the source number (integer and decimals)
I int64 // integer digits of n
V int64 // number of visible fraction digits in n, with trailing zeros
@@ -17,7 +17,7 @@ type operands struct {
}
// NmodEqualAny returns true if o represents an integer equal to any of the arguments.
-func (o *operands) NequalsAny(any ...int64) bool {
+func (o *Operands) NequalsAny(any ...int64) bool {
for _, i := range any {
if o.I == i && o.T == 0 {
return true
@@ -27,7 +27,7 @@ func (o *operands) NequalsAny(any ...int64) bool {
}
// NmodEqualAny returns true if o represents an integer equal to any of the arguments modulo mod.
-func (o *operands) NmodEqualsAny(mod int64, any ...int64) bool {
+func (o *Operands) NmodEqualsAny(mod int64, any ...int64) bool {
modI := o.I % mod
for _, i := range any {
if modI == i && o.T == 0 {
@@ -38,17 +38,17 @@ func (o *operands) NmodEqualsAny(mod int64, any ...int64) bool {
}
// NmodInRange returns true if o represents an integer in the closed interval [from, to].
-func (o *operands) NinRange(from, to int64) bool {
+func (o *Operands) NinRange(from, to int64) bool {
return o.T == 0 && from <= o.I && o.I <= to
}
// NmodInRange returns true if o represents an integer in the closed interval [from, to] modulo mod.
-func (o *operands) NmodInRange(mod, from, to int64) bool {
+func (o *Operands) NmodInRange(mod, from, to int64) bool {
modI := o.I % mod
return o.T == 0 && from <= modI && modI <= to
}
-func newOperands(v interface{}) (*operands, error) {
+func newOperands(v interface{}) (*Operands, error) {
switch v := v.(type) {
case int:
return newOperandsInt64(int64(v)), nil
@@ -69,14 +69,14 @@ func newOperands(v interface{}) (*operands, error) {
}
}
-func newOperandsInt64(i int64) *operands {
+func newOperandsInt64(i int64) *Operands {
if i < 0 {
i = -i
}
- return &operands{float64(i), i, 0, 0, 0, 0}
+ return &Operands{float64(i), i, 0, 0, 0, 0}
}
-func newOperandsString(s string) (*operands, error) {
+func newOperandsString(s string) (*Operands, error) {
if s[0] == '-' {
s = s[1:]
}
@@ -84,7 +84,7 @@ func newOperandsString(s string) (*operands, error) {
if err != nil {
return nil, err
}
- ops := &operands{N: n}
+ ops := &Operands{N: n}
parts := strings.SplitN(s, ".", 2)
ops.I, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands_test.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands_test.go
index 29030876a..e4f33902b 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands_test.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands_test.go
@@ -8,20 +8,20 @@ import (
func TestNewOperands(t *testing.T) {
tests := []struct {
input interface{}
- ops *operands
+ ops *Operands
err bool
}{
- {int64(0), &operands{0.0, 0, 0, 0, 0, 0}, false},
- {int64(1), &operands{1.0, 1, 0, 0, 0, 0}, false},
- {"0", &operands{0.0, 0, 0, 0, 0, 0}, false},
- {"1", &operands{1.0, 1, 0, 0, 0, 0}, false},
- {"1.0", &operands{1.0, 1, 1, 0, 0, 0}, false},
- {"1.00", &operands{1.0, 1, 2, 0, 0, 0}, false},
- {"1.3", &operands{1.3, 1, 1, 1, 3, 3}, false},
- {"1.30", &operands{1.3, 1, 2, 1, 30, 3}, false},
- {"1.03", &operands{1.03, 1, 2, 2, 3, 3}, false},
- {"1.230", &operands{1.23, 1, 3, 2, 230, 23}, false},
- {"20.0230", &operands{20.023, 20, 4, 3, 230, 23}, false},
+ {int64(0), &Operands{0.0, 0, 0, 0, 0, 0}, false},
+ {int64(1), &Operands{1.0, 1, 0, 0, 0, 0}, false},
+ {"0", &Operands{0.0, 0, 0, 0, 0, 0}, false},
+ {"1", &Operands{1.0, 1, 0, 0, 0, 0}, false},
+ {"1.0", &Operands{1.0, 1, 1, 0, 0, 0}, false},
+ {"1.00", &Operands{1.0, 1, 2, 0, 0, 0}, false},
+ {"1.3", &Operands{1.3, 1, 1, 1, 3, 3}, false},
+ {"1.30", &Operands{1.3, 1, 2, 1, 30, 3}, false},
+ {"1.03", &Operands{1.03, 1, 2, 2, 3, 3}, false},
+ {"1.230", &Operands{1.23, 1, 3, 2, 230, 23}, false},
+ {"20.0230", &Operands{20.023, 20, 4, 3, 230, 23}, false},
{20.0230, nil, true},
}
for _, test := range tests {
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go
index fc3522682..85b2a1b37 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go
@@ -7,7 +7,7 @@ import "strings"
// http://unicode.org/reports/tr35/tr35-numbers.html#Operands
type PluralSpec struct {
Plurals map[Plural]struct{}
- PluralFunc func(*operands) Plural
+ PluralFunc func(*Operands) Plural
}
var pluralSpecs = make(map[string]*PluralSpec)
@@ -18,7 +18,8 @@ func normalizePluralSpecID(id string) string {
return id
}
-func registerPluralSpec(ids []string, ps *PluralSpec) {
+// RegisterPluralSpec registers a new plural spec for the language ids.
+func RegisterPluralSpec(ids []string, ps *PluralSpec) {
for _, id := range ids {
id = normalizePluralSpecID(id)
pluralSpecs[id] = ps
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go
index c9b4f2667..0268bb92c 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go
@@ -4,15 +4,15 @@ package language
func init() {
- registerPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "zh"}, &PluralSpec{
+ RegisterPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "yue", "zh"}, &PluralSpec{
Plurals: newPluralSet(Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
return Other
},
})
- registerPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{
+ RegisterPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 0 or n = 1
if intEqualsAny(ops.I, 0) ||
ops.NequalsAny(1) {
@@ -21,9 +21,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 0,1
if intEqualsAny(ops.I, 0, 1) {
return One
@@ -31,9 +31,19 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{
+ RegisterPluralSpec([]string{"pt"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
+ // i = 0..1
+ if intInRange(ops.I, 0, 1) {
+ return One
+ }
+ return Other
+ },
+ })
+ RegisterPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{
+ Plurals: newPluralSet(One, Other),
+ PluralFunc: func(ops *Operands) Plural {
// i = 1 and v = 0
if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) {
return One
@@ -41,9 +51,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"si"}, &PluralSpec{
+ RegisterPluralSpec([]string{"si"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0,1 or i = 0 and f = 1
if ops.NequalsAny(0, 1) ||
intEqualsAny(ops.I, 0) && intEqualsAny(ops.F, 1) {
@@ -52,9 +62,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0..1
if ops.NinRange(0, 1) {
return One
@@ -62,9 +72,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"tzm"}, &PluralSpec{
+ RegisterPluralSpec([]string{"tzm"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0..1 or n = 11..99
if ops.NinRange(0, 1) ||
ops.NinRange(11, 99) {
@@ -73,19 +83,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"pt"}, &PluralSpec{
- Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
- // n = 0..2 and n != 2
- if ops.NinRange(0, 2) && !ops.NequalsAny(2) {
- return One
- }
- return Other
- },
- })
- registerPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{
+ RegisterPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1
if ops.NequalsAny(1) {
return One
@@ -93,19 +93,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"pt_PT"}, &PluralSpec{
- Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
- // n = 1 and v = 0
- if ops.NequalsAny(1) && intEqualsAny(ops.V, 0) {
- return One
- }
- return Other
- },
- })
- registerPluralSpec([]string{"da"}, &PluralSpec{
+ RegisterPluralSpec([]string{"da"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1 or t != 0 and i = 0,1
if ops.NequalsAny(1) ||
!intEqualsAny(ops.T, 0) && intEqualsAny(ops.I, 0, 1) {
@@ -114,9 +104,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"is"}, &PluralSpec{
+ RegisterPluralSpec([]string{"is"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0
if intEqualsAny(ops.T, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) ||
!intEqualsAny(ops.T, 0) {
@@ -125,9 +115,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"mk"}, &PluralSpec{
+ RegisterPluralSpec([]string{"mk"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 10 = 1 or f % 10 = 1
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) ||
intEqualsAny(ops.F%10, 1) {
@@ -136,9 +126,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"fil", "tl"}, &PluralSpec{
+ RegisterPluralSpec([]string{"fil", "tl"}, &PluralSpec{
Plurals: newPluralSet(One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I, 1, 2, 3) ||
intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I%10, 4, 6, 9) ||
@@ -148,9 +138,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"lv", "prg"}, &PluralSpec{
+ RegisterPluralSpec([]string{"lv", "prg"}, &PluralSpec{
Plurals: newPluralSet(Zero, One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19
if ops.NmodEqualsAny(10, 0) ||
ops.NmodInRange(100, 11, 19) ||
@@ -166,9 +156,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"lag"}, &PluralSpec{
+ RegisterPluralSpec([]string{"lag"}, &PluralSpec{
Plurals: newPluralSet(Zero, One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0
if ops.NequalsAny(0) {
return Zero
@@ -180,9 +170,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ksh"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ksh"}, &PluralSpec{
Plurals: newPluralSet(Zero, One, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0
if ops.NequalsAny(0) {
return Zero
@@ -194,9 +184,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{
+ RegisterPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1
if ops.NequalsAny(1) {
return One
@@ -208,9 +198,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"shi"}, &PluralSpec{
+ RegisterPluralSpec([]string{"shi"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 0 or n = 1
if intEqualsAny(ops.I, 0) ||
ops.NequalsAny(1) {
@@ -223,9 +213,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"mo", "ro"}, &PluralSpec{
+ RegisterPluralSpec([]string{"mo", "ro"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 1 and v = 0
if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) {
return One
@@ -239,9 +229,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{
+ RegisterPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) ||
intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) {
@@ -255,9 +245,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"gd"}, &PluralSpec{
+ RegisterPluralSpec([]string{"gd"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1,11
if ops.NequalsAny(1, 11) {
return One
@@ -273,9 +263,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"sl"}, &PluralSpec{
+ RegisterPluralSpec([]string{"sl"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 100 = 1
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) {
return One
@@ -292,9 +282,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{
+ RegisterPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 100 = 1 or f % 100 = 1
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) ||
intEqualsAny(ops.F%100, 1) {
@@ -313,9 +303,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"he", "iw"}, &PluralSpec{
+ RegisterPluralSpec([]string{"he", "iw"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 1 and v = 0
if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) {
return One
@@ -331,9 +321,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"cs", "sk"}, &PluralSpec{
+ RegisterPluralSpec([]string{"cs", "sk"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 1 and v = 0
if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) {
return One
@@ -349,9 +339,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"pl"}, &PluralSpec{
+ RegisterPluralSpec([]string{"pl"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// i = 1 and v = 0
if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) {
return One
@@ -369,9 +359,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"be"}, &PluralSpec{
+ RegisterPluralSpec([]string{"be"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n % 10 = 1 and n % 100 != 11
if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) {
return One
@@ -389,9 +379,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"lt"}, &PluralSpec{
+ RegisterPluralSpec([]string{"lt"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n % 10 = 1 and n % 100 != 11..19
if ops.NmodEqualsAny(10, 1) && !ops.NmodInRange(100, 11, 19) {
return One
@@ -407,9 +397,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"mt"}, &PluralSpec{
+ RegisterPluralSpec([]string{"mt"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1
if ops.NequalsAny(1) {
return One
@@ -426,9 +416,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ru", "uk"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ru", "uk"}, &PluralSpec{
Plurals: newPluralSet(One, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 10 = 1 and i % 100 != 11
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) {
return One
@@ -446,9 +436,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"br"}, &PluralSpec{
+ RegisterPluralSpec([]string{"br"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n % 10 = 1 and n % 100 != 11,71,91
if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11, 71, 91) {
return One
@@ -468,9 +458,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ga"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ga"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 1
if ops.NequalsAny(1) {
return One
@@ -490,9 +480,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"gv"}, &PluralSpec{
+ RegisterPluralSpec([]string{"gv"}, &PluralSpec{
Plurals: newPluralSet(One, Two, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// v = 0 and i % 10 = 1
if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) {
return One
@@ -512,9 +502,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"ar"}, &PluralSpec{
+ RegisterPluralSpec([]string{"ar", "ars"}, &PluralSpec{
Plurals: newPluralSet(Zero, One, Two, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0
if ops.NequalsAny(0) {
return Zero
@@ -538,9 +528,9 @@ func init() {
return Other
},
})
- registerPluralSpec([]string{"cy"}, &PluralSpec{
+ RegisterPluralSpec([]string{"cy"}, &PluralSpec{
Plurals: newPluralSet(Zero, One, Two, Few, Many, Other),
- PluralFunc: func(ops *operands) Plural {
+ PluralFunc: func(ops *Operands) Plural {
// n = 0
if ops.NequalsAny(0) {
return Zero
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen_test.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen_test.go
index c8ec41fd4..4cfa97bdf 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen_test.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen_test.go
@@ -4,13 +4,13 @@ package language
import "testing"
-func TestBmBoDzIdIgIiInJaJboJvJwKdeKeaKmKoLktLoMsMyNqoRootSahSesSgThToViWoYoZh(t *testing.T) {
+func TestBmBoDzIdIgIiInJaJboJvJwKdeKeaKmKoLktLoMsMyNqoRootSahSesSgThToViWoYoYueZh(t *testing.T) {
var tests []pluralTest
tests = appendIntegerTests(tests, Other, []string{"0~15", "100", "1000", "10000", "100000", "1000000"})
tests = appendDecimalTests(tests, Other, []string{"0.0~1.5", "10.0", "100.0", "1000.0", "10000.0", "100000.0", "1000000.0"})
- locales := []string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "zh"}
+ locales := []string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "yue", "zh"}
for _, locale := range locales {
runTests(t, locale, tests)
}
@@ -46,6 +46,21 @@ func TestFfFrHyKab(t *testing.T) {
}
}
+func TestPt(t *testing.T) {
+ var tests []pluralTest
+
+ tests = appendIntegerTests(tests, One, []string{"0", "1"})
+ tests = appendDecimalTests(tests, One, []string{"0.0~1.5"})
+
+ tests = appendIntegerTests(tests, Other, []string{"2~17", "100", "1000", "10000", "100000", "1000000"})
+ tests = appendDecimalTests(tests, Other, []string{"2.0~3.5", "10.0", "100.0", "1000.0", "10000.0", "100000.0", "1000000.0"})
+
+ locales := []string{"pt"}
+ for _, locale := range locales {
+ runTests(t, locale, tests)
+ }
+}
+
func TestAstCaDeEnEtFiFyGlItJiNlSvSwUrYi(t *testing.T) {
var tests []pluralTest
@@ -105,21 +120,6 @@ func TestTzm(t *testing.T) {
}
}
-func TestPt(t *testing.T) {
- var tests []pluralTest
-
- tests = appendIntegerTests(tests, One, []string{"0", "1"})
- tests = appendDecimalTests(tests, One, []string{"0.0", "1.0", "0.00", "1.00", "0.000", "1.000", "0.0000", "1.0000"})
-
- tests = appendIntegerTests(tests, Other, []string{"2~17", "100", "1000", "10000", "100000", "1000000"})
- tests = appendDecimalTests(tests, Other, []string{"0.1~0.9", "1.1~1.7", "10.0", "100.0", "1000.0", "10000.0", "100000.0", "1000000.0"})
-
- locales := []string{"pt"}
- for _, locale := range locales {
- runTests(t, locale, tests)
- }
-}
-
func TestAfAsaAzBemBezBgBrxCeCggChrCkbDvEeElEoEsEuFoFurGswHaHawHuJgoJmcKaKajKcgKkKkjKlKsKsbKuKyLbLgMasMgoMlMnNahNbNdNeNnNnhNoNrNyNynOmOrOsPapPsRmRofRwkSaqSdhSehSnSoSqSsSsyStSyrTaTeTeoTigTkTnTrTsUgUzVeVoVunWaeXhXog(t *testing.T) {
var tests []pluralTest
@@ -135,20 +135,6 @@ func TestAfAsaAzBemBezBgBrxCeCggChrCkbDvEeElEoEsEuFoFurGswHaHawHuJgoJmcKaKajKcgK
}
}
-func TestPt_PT(t *testing.T) {
- var tests []pluralTest
-
- tests = appendIntegerTests(tests, One, []string{"1"})
-
- tests = appendIntegerTests(tests, Other, []string{"0", "2~16", "100", "1000", "10000", "100000", "1000000"})
- tests = appendDecimalTests(tests, Other, []string{"0.0~1.5", "10.0", "100.0", "1000.0", "10000.0", "100000.0", "1000000.0"})
-
- locales := []string{"pt_PT"}
- for _, locale := range locales {
- runTests(t, locale, tests)
- }
-}
-
func TestDa(t *testing.T) {
var tests []pluralTest
@@ -590,7 +576,7 @@ func TestGv(t *testing.T) {
}
}
-func TestAr(t *testing.T) {
+func TestArArs(t *testing.T) {
var tests []pluralTest
tests = appendIntegerTests(tests, Zero, []string{"0"})
@@ -611,7 +597,7 @@ func TestAr(t *testing.T) {
tests = appendIntegerTests(tests, Other, []string{"100~102", "200~202", "300~302", "400~402", "500~502", "600", "1000", "10000", "100000", "1000000"})
tests = appendDecimalTests(tests, Other, []string{"0.1~0.9", "1.1~1.7", "10.1", "100.0", "1000.0", "10000.0", "100000.0", "1000000.0"})
- locales := []string{"ar"}
+ locales := []string{"ar", "ars"}
for _, locale := range locales {
runTests(t, locale, tests)
}
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_test.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_test.go
index 34931b7bb..919b489bd 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_test.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_test.go
@@ -23,8 +23,8 @@ func TestGetPluralSpec(t *testing.T) {
{"zh-TW", pluralSpecs["zh"]},
{"pt-BR", pluralSpecs["pt"]},
{"pt_BR", pluralSpecs["pt"]},
- {"pt-PT", pluralSpecs["pt-pt"]},
- {"pt_PT", pluralSpecs["pt-pt"]},
+ {"pt-PT", pluralSpecs["pt"]},
+ {"pt_PT", pluralSpecs["pt"]},
{"zh-Hans-CN", pluralSpecs["zh"]},
{"zh-Hant-TW", pluralSpecs["zh"]},
{"zh-CN", pluralSpecs["zh"]},
@@ -462,11 +462,11 @@ func TestPortuguese(t *testing.T) {
{"0.0", One},
{1, One},
{"1.0", One},
- {onePlusEpsilon, Other},
+ {onePlusEpsilon, One},
{2, Other},
}
- tests = appendFloatTests(tests, 0.1, 0.9, Other)
- tests = appendFloatTests(tests, 1.1, 10.0, Other)
+ tests = appendFloatTests(tests, 0, 1.5, One)
+ tests = appendFloatTests(tests, 2, 10.0, Other)
runTests(t, "pt", tests)
}
@@ -484,23 +484,6 @@ func TestMacedonian(t *testing.T) {
runTests(t, "mk", tests)
}
-func TestPortugueseEuropean(t *testing.T) {
- tests := []pluralTest{
- {0, Other},
- {"0.0", Other},
- {"0.1", Other},
- {"0.01", Other},
- {1, One},
- {"1", One},
- {"1.1", Other},
- {"1.01", Other},
- {onePlusEpsilon, Other},
- {2, Other},
- }
- tests = appendFloatTests(tests, 2.0, 10.0, Other)
- runTests(t, "pt-pt", tests)
-}
-
func TestRussian(t *testing.T) {
tests := []pluralTest{
{0, Many},
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go
index 5dd74b2f5..17c32609c 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go
@@ -50,7 +50,7 @@ func (pt *pluralTranslation) Normalize(l *language.Language) Translation {
func (pt *pluralTranslation) Backfill(src Translation) Translation {
for pc, t := range pt.templates {
- if t == nil || t.src == "" {
+ if (t == nil || t.src == "") && src != nil {
pt.templates[pc] = src.Template(language.Other)
}
}
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go
index 9fcba5a18..a76c8c941 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go
@@ -37,7 +37,7 @@ func (st *singleTranslation) Normalize(language *language.Language) Translation
}
func (st *singleTranslation) Backfill(src Translation) Translation {
- if st.template == nil || st.template.src == "" {
+ if (st.template == nil || st.template.src == "") && src != nil {
st.template = src.Template(language.Other)
}
return st
diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml
index 1f8b41ffe..496691166 100644
--- a/vendor/github.com/pelletier/go-toml/.travis.yml
+++ b/vendor/github.com/pelletier/go-toml/.travis.yml
@@ -1,9 +1,9 @@
sudo: false
language: go
go:
- - 1.6.4
- 1.7.6
- 1.8.3
+ - 1.9
- tip
matrix:
allow_failures:
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
index 22da41a81..2681690d5 100644
--- a/vendor/github.com/pelletier/go-toml/README.md
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -33,7 +33,7 @@ import "github.com/pelletier/go-toml"
Read a TOML document:
```go
-config, _ := toml.LoadString(`
+config, _ := toml.Load(`
[postgres]
user = "pelletier"
password = "mypassword"`)
@@ -42,7 +42,7 @@ user := config.Get("postgres.user").(string)
// or using an intermediate object
postgresConfig := config.Get("postgres").(*toml.Tree)
-password = postgresConfig.Get("password").(string)
+password := postgresConfig.Get("password").(string)
```
Or use Unmarshal:
@@ -62,7 +62,7 @@ user = "pelletier"
password = "mypassword"`)
config := Config{}
-Unmarshal(doc, &config)
+toml.Unmarshal(doc, &config)
fmt.Println("user=", config.Postgres.User)
```
@@ -70,7 +70,8 @@ Or use a query:
```go
// use a query to gather elements without walking the tree
-results, _ := config.Query("$..[user,password]")
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
for ii, item := range results.Values() {
fmt.Println("Query result %d: %v", ii, item)
}
diff --git a/vendor/github.com/pelletier/go-toml/query/parser.go b/vendor/github.com/pelletier/go-toml/query/parser.go
index e4f91b97e..5f69b70d4 100644
--- a/vendor/github.com/pelletier/go-toml/query/parser.go
+++ b/vendor/github.com/pelletier/go-toml/query/parser.go
@@ -253,7 +253,7 @@ func (p *queryParser) parseFilterExpr() queryParserStateFn {
}
tok = p.getToken()
if tok.typ != tokenKey && tok.typ != tokenString {
- return p.parseError(tok, "expected key or string for filter funciton name")
+ return p.parseError(tok, "expected key or string for filter function name")
}
name := tok.val
tok = p.getToken()
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
index bf3eed6f5..7ca408d1b 100644
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -6,6 +6,7 @@ go:
- 1.6.x
- 1.7.x
- 1.8.x
+ - 1.9.x
- tip
script:
diff --git a/vendor/github.com/prometheus/common/promlog/flag/flag.go b/vendor/github.com/prometheus/common/promlog/flag/flag.go
new file mode 100644
index 000000000..b9d361e43
--- /dev/null
+++ b/vendor/github.com/prometheus/common/promlog/flag/flag.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flag
+
+import (
+ "github.com/prometheus/common/promlog"
+ kingpin "gopkg.in/alecthomas/kingpin.v2"
+)
+
+// LevelFlagName is the canonical flag name to configure the allowed log level
+// within Prometheus projects.
+const LevelFlagName = "log.level"
+
+// LevelFlagHelp is the help description for the log.level flag.
+const LevelFlagHelp = "Only log messages with the given severity or above. One of: [debug, info, warn, error]"
+
+// AddFlags adds the flags used by this package to the Kingpin application.
+// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
+func AddFlags(a *kingpin.Application, logLevel *promlog.AllowedLevel) {
+ a.Flag(LevelFlagName, LevelFlagHelp).
+ Default("info").SetValue(logLevel)
+}
diff --git a/vendor/github.com/prometheus/common/promlog/log.go b/vendor/github.com/prometheus/common/promlog/log.go
new file mode 100644
index 000000000..cf8307ad2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/promlog/log.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promlog defines standardised ways to initialize Go kit loggers
+// across Prometheus components.
+// It should typically only ever be imported by main packages.
+package promlog
+
+import (
+ "os"
+
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+ "github.com/pkg/errors"
+)
+
+// AllowedLevel is a settable identifier for the minimum level a log entry
+// must be have.
+type AllowedLevel struct {
+ s string
+ o level.Option
+}
+
+func (l *AllowedLevel) String() string {
+ return l.s
+}
+
+// Set updates the value of the allowed level.
+func (l *AllowedLevel) Set(s string) error {
+ switch s {
+ case "debug":
+ l.o = level.AllowDebug()
+ case "info":
+ l.o = level.AllowInfo()
+ case "warn":
+ l.o = level.AllowWarn()
+ case "error":
+ l.o = level.AllowError()
+ default:
+ return errors.Errorf("unrecognized log level %q", s)
+ }
+ l.s = s
+ return nil
+}
+
+// New returns a new leveled oklog logger in the logfmt format. Each logged line will be annotated
+// with a timestamp. The output always goes to stderr.
+func New(al AllowedLevel) log.Logger {
+ l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+ l = level.NewFilter(l, al.o)
+ l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+ return l
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 000000000..08b3b7e01
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // afero does not support Lstat directly.
+ if _, err = lstatIfOs(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/match_test.go b/vendor/github.com/spf13/afero/match_test.go
new file mode 100644
index 000000000..21e1faecd
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match_test.go
@@ -0,0 +1,183 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+// contains returns true if vector contains the string s.
+func contains(vector []string, s string) bool {
+ for _, elem := range vector {
+ if elem == s {
+ return true
+ }
+ }
+ return false
+}
+
+func setupGlobDirRoot(t *testing.T, fs Fs) string {
+ path := testDir(fs)
+ setupGlobFiles(t, fs, path)
+ return path
+}
+
+func setupGlobDirReusePath(t *testing.T, fs Fs, path string) string {
+ testRegistry[fs] = append(testRegistry[fs], path)
+ return setupGlobFiles(t, fs, path)
+}
+
+func setupGlobFiles(t *testing.T, fs Fs, path string) string {
+ testSubDir := filepath.Join(path, "globs", "bobs")
+ err := fs.MkdirAll(testSubDir, 0700)
+ if err != nil && !os.IsExist(err) {
+ t.Fatal(err)
+ }
+
+ f, err := fs.Create(filepath.Join(testSubDir, "/matcher"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 1 content")
+ f.Close()
+
+ f, err = fs.Create(filepath.Join(testSubDir, "/../submatcher"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 2 content")
+ f.Close()
+
+ f, err = fs.Create(filepath.Join(testSubDir, "/../../match"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 3 content")
+ f.Close()
+
+ return testSubDir
+}
+
+func TestGlob(t *testing.T) {
+ defer removeAllTestFiles(t)
+ var testDir string
+ for i, fs := range Fss {
+ if i == 0 {
+ testDir = setupGlobDirRoot(t, fs)
+ } else {
+ setupGlobDirReusePath(t, fs, testDir)
+ }
+ }
+
+ var globTests = []struct {
+ pattern, result string
+ }{
+ {testDir + "/globs/bobs/matcher", testDir + "/globs/bobs/matcher"},
+ {testDir + "/globs/*/mat?her", testDir + "/globs/bobs/matcher"},
+ {testDir + "/globs/bobs/../*", testDir + "/globs/submatcher"},
+ {testDir + "/match", testDir + "/match"},
+ }
+
+ for _, fs := range Fss {
+
+ for _, tt := range globTests {
+ pattern := tt.pattern
+ result := tt.result
+ if runtime.GOOS == "windows" {
+ pattern = filepath.Clean(pattern)
+ result = filepath.Clean(result)
+ }
+ matches, err := Glob(fs, pattern)
+ if err != nil {
+ t.Errorf("Glob error for %q: %s", pattern, err)
+ continue
+ }
+ if !contains(matches, result) {
+ t.Errorf("Glob(%#q) = %#v want %v", pattern, matches, result)
+ }
+ }
+ for _, pattern := range []string{"no_match", "../*/no_match"} {
+ matches, err := Glob(fs, pattern)
+ if err != nil {
+ t.Errorf("Glob error for %q: %s", pattern, err)
+ continue
+ }
+ if len(matches) != 0 {
+ t.Errorf("Glob(%#q) = %#v want []", pattern, matches)
+ }
+ }
+
+ }
+}
+
+func TestGlobSymlink(t *testing.T) {
+ defer removeAllTestFiles(t)
+
+ fs := &OsFs{}
+ testDir := setupGlobDirRoot(t, fs)
+
+ err := os.Symlink("target", filepath.Join(testDir, "symlink"))
+ if err != nil {
+ t.Skipf("skipping on %s", runtime.GOOS)
+ }
+
+ var globSymlinkTests = []struct {
+ path, dest string
+ brokenLink bool
+ }{
+ {"test1", "link1", false},
+ {"test2", "link2", true},
+ }
+
+ for _, tt := range globSymlinkTests {
+ path := filepath.Join(testDir, tt.path)
+ dest := filepath.Join(testDir, tt.dest)
+ f, err := fs.Create(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ err = os.Symlink(path, dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tt.brokenLink {
+ // Break the symlink.
+ fs.Remove(path)
+ }
+ matches, err := Glob(fs, dest)
+ if err != nil {
+ t.Errorf("GlobSymlink error for %q: %s", dest, err)
+ }
+ if !contains(matches, dest) {
+ t.Errorf("Glob(%#q) = %#v want %v", dest, matches, dest)
+ }
+ }
+}
+
+
+func TestGlobError(t *testing.T) {
+ for _, fs := range Fss {
+ _, err := Glob(fs, "[7]")
+ if err != nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 767ac1d5f..14cd438fb 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -66,7 +66,10 @@ func (m *MemMapFs) unRegisterWithParent(fileName string) error {
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
+
+ parent.Lock()
mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
return nil
}
@@ -99,8 +102,10 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData) {
}
}
+ parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
+ parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
diff --git a/vendor/github.com/spf13/afero/memmap_test.go b/vendor/github.com/spf13/afero/memmap_test.go
index ca5abbca6..d28e91220 100644
--- a/vendor/github.com/spf13/afero/memmap_test.go
+++ b/vendor/github.com/spf13/afero/memmap_test.go
@@ -1,6 +1,7 @@
package afero
import (
+ "fmt"
"os"
"path/filepath"
"runtime"
@@ -343,3 +344,43 @@ func TestRacingDeleteAndClose(t *testing.T) {
}()
close(in)
}
+
+// This test should be run with the race detector on:
+// go test -run TestMemFsDataRace -race
+func TestMemFsDataRace(t *testing.T) {
+ const dir = "test_dir"
+ fs := NewMemMapFs()
+
+ if err := fs.MkdirAll(dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ const n = 1000
+ done := make(chan struct{})
+
+ go func() {
+ defer close(done)
+ for i := 0; i < n; i++ {
+ fname := filepath.Join(dir, fmt.Sprintf("%d.txt", i))
+ if err := WriteFile(fs, fname, []byte(""), 0777); err != nil {
+ panic(err)
+ }
+ if err := fs.Remove(fname); err != nil {
+ panic(err)
+ }
+ }
+ }()
+
+loop:
+ for {
+ select {
+ case <-done:
+ break loop
+ default:
+ _, err := ReadDir(fs, dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index a38137b25..da9aa881e 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -117,14 +117,6 @@ Flag functionality is provided by the [pflag
library](https://github.com/spf13/pflag), a fork of the flag standard library
which maintains the same interface while adding POSIX compliance.
-## Usage
-
-Cobra works by creating a set of commands and then organizing them into a tree.
-The tree defines the structure of the application.
-
-Once each command is defined with its corresponding flags, then the
-tree is assigned to the commander which is finally executed.
-
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
of the library. This command will install the `cobra` generator executable
@@ -159,17 +151,17 @@ In a Cobra app, typically the main.go file is very bare. It serves one purpose:
package main
import (
- "fmt"
- "os"
+ "fmt"
+ "os"
- "{pathToYourApp}/cmd"
+ "{pathToYourApp}/cmd"
)
func main() {
- if err := cmd.RootCmd.Execute(); err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+ if err := cmd.RootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
}
```
@@ -285,14 +277,14 @@ Ideally you place this in app/cmd/root.go:
```go
var RootCmd = &cobra.Command{
- Use: "hugo",
- Short: "Hugo is a very fast static site generator",
- Long: `A Fast and Flexible Static Site Generator built with
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com`,
- Run: func(cmd *cobra.Command, args []string) {
- // Do Stuff Here
- },
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
}
```
@@ -302,54 +294,54 @@ For example cmd/root.go:
```go
import (
- "fmt"
- "os"
+ "fmt"
+ "os"
- homedir "github.com/mitchellh/go-homedir"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
)
func init() {
- cobra.OnInitialize(initConfig)
- RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
- RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
- RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
- RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
- RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
- viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
- viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase"))
- viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper"))
- viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
- viper.SetDefault("license", "apache")
+ cobra.OnInitialize(initConfig)
+ RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+ RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+ RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+ RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+ viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase"))
+ viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+ viper.SetDefault("license", "apache")
}
func Execute() {
- RootCmd.Execute()
+ RootCmd.Execute()
}
func initConfig() {
// Don't forget to read config either from cfgFile or from home directory!
- if cfgFile != "" {
- // Use config file from the flag.
- viper.SetConfigFile(cfgFile)
- } else {
- // Find home directory.
- home, err := homedir.Dir()
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-
- // Search config in home directory with name ".cobra" (without extension).
- viper.AddConfigPath(home)
- viper.SetConfigName(".cobra")
- }
-
- if err := viper.ReadInConfig(); err != nil {
- fmt.Println("Can't read config:", err)
- os.Exit(1)
- }
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ if err := viper.ReadInConfig(); err != nil {
+ fmt.Println("Can't read config:", err)
+ os.Exit(1)
+ }
}
```
@@ -364,17 +356,17 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose,
package main
import (
- "fmt"
- "os"
+ "fmt"
+ "os"
- "{pathToYourApp}/cmd"
+ "{pathToYourApp}/cmd"
)
func main() {
- if err := cmd.RootCmd.Execute(); err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+ if err := cmd.RootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
}
```
@@ -390,21 +382,21 @@ populate it with the following:
package cmd
import (
- "github.com/spf13/cobra"
- "fmt"
+ "github.com/spf13/cobra"
+ "fmt"
)
func init() {
- RootCmd.AddCommand(versionCmd)
+ RootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
- Use: "version",
- Short: "Print the version number of Hugo",
- Long: `All software has versions. This is Hugo's`,
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
- },
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
}
```
@@ -419,19 +411,6 @@ root, but commands can be attached at any level.
RootCmd.AddCommand(versionCmd)
```
-### Remove a command from its parent
-
-Removing a command is not a common action in simple programs, but it allows 3rd
-parties to customize an existing command tree.
-
-In this example, we remove the existing `VersionCmd` command of an existing
-root command, and we replace it with our own version:
-
-```go
-mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd)
-mainlib.RootCmd.AddCommand(versionCmd)
-```
-
## Working with Flags
Flags provide modifiers to control how the action command operates.
@@ -474,8 +453,8 @@ You can also bind your flags with [viper](https://github.com/spf13/viper):
var author string
func init() {
- RootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
- viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
+ RootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
}
```
@@ -487,32 +466,37 @@ More in [viper documentation](https://github.com/spf13/viper#working-with-flags)
## Positional and Custom Arguments
-Validation of positional arguments can be specified using the `Args` field.
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
-The follow validators are built in:
+The following validators are built in:
- `NoArgs` - the command will report an error if there are any positional args.
- `ArbitraryArgs` - the command will accept any args.
-- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the ValidArgs list.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
-A custom validator can be provided like this:
+An example of setting the custom validator:
```go
-
-Args: func validColorArgs(cmd *cobra.Command, args []string) error {
- if err := cli.RequiresMinArgs(1)(cmd, args); err != nil {
- return err
- }
- if myapp.IsValidColor(args[0]) {
- return nil
- }
- return fmt.Errorf("Invalid color specified: %s", args[0])
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires at least one arg")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
}
-
```
## Example
@@ -530,56 +514,56 @@ More documentation about flags is available at https://github.com/spf13/pflag
package main
import (
- "fmt"
- "strings"
+ "fmt"
+ "strings"
- "github.com/spf13/cobra"
+ "github.com/spf13/cobra"
)
func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
- var echoTimes int
-
- var cmdPrint = &cobra.Command{
- Use: "print [string to print]",
- Short: "Print anything to the screen",
- Long: `print is for printing anything back to the screen.
- For many years people have printed back to the screen.
- `,
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Print: " + strings.Join(args, " "))
- },
- }
-
- var cmdEcho = &cobra.Command{
- Use: "echo [string to echo]",
- Short: "Echo anything to the screen",
- Long: `echo is for echoing anything back.
- Echo works a lot like print, except it has a child command.
- `,
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Print: " + strings.Join(args, " "))
- },
- }
-
- var cmdTimes = &cobra.Command{
- Use: "times [# times] [string to echo]",
- Short: "Echo anything to the screen more times",
- Long: `echo things multiple times back to the user by providing
- a count and a string.`,
- Run: func(cmd *cobra.Command, args []string) {
- for i := 0; i < echoTimes; i++ {
- fmt.Println("Echo: " + strings.Join(args, " "))
- }
- },
- }
-
- cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
-
- var rootCmd = &cobra.Command{Use: "app"}
- rootCmd.AddCommand(cmdPrint, cmdEcho)
- cmdEcho.AddCommand(cmdTimes)
- rootCmd.Execute()
+ var cmdTimes = &cobra.Command{
+ Use: "times [# times] [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
}
```
@@ -665,16 +649,16 @@ The default help command is
```go
func (c *Command) initHelp() {
- if c.helpCommand == nil {
- c.helpCommand = &Command{
- Use: "help [command]",
- Short: "Help about any command",
- Long: `Help provides help for any command in the application.
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
Simply type ` + c.Name() + ` help [path to command] for full details.`,
- Run: c.HelpFunc(),
- }
- }
- c.AddCommand(c.helpCommand)
+ Run: c.HelpFunc(),
+ }
+ }
+ c.AddCommand(c.helpCommand)
}
```
@@ -682,9 +666,7 @@ You can provide your own command, function or template through the following met
```go
command.SetHelpCommand(cmd *Command)
-
command.SetHelpFunc(f func(*Command, []string))
-
command.SetHelpTemplate(s string)
```
@@ -750,8 +732,8 @@ The default usage function is:
```go
return func(c *Command) error {
- err := tmpl(c.Out(), c.UsageTemplate(), c)
- return err
+ err := tmpl(c.Out(), c.UsageTemplate(), c)
+ return err
}
```
@@ -779,57 +761,57 @@ An example of two commands which use all of these features is below. When the s
package main
import (
- "fmt"
+ "fmt"
- "github.com/spf13/cobra"
+ "github.com/spf13/cobra"
)
func main() {
- var rootCmd = &cobra.Command{
- Use: "root [sub]",
- Short: "My root command",
- PersistentPreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
- },
- PreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
- },
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd Run with args: %v\n", args)
- },
- PostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
- },
- PersistentPostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
- },
- }
-
- var subCmd = &cobra.Command{
- Use: "sub [no options!]",
- Short: "My subcommand",
- PreRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
- },
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd Run with args: %v\n", args)
- },
- PostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
- },
- PersistentPostRun: func(cmd *cobra.Command, args []string) {
- fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
- },
- }
-
- rootCmd.AddCommand(subCmd)
-
- rootCmd.SetArgs([]string{""})
- _ = rootCmd.Execute()
- fmt.Print("\n")
- rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
- _ = rootCmd.Execute()
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
}
```
@@ -855,28 +837,28 @@ command.
package main
import (
- "errors"
- "log"
+ "errors"
+ "log"
- "github.com/spf13/cobra"
+ "github.com/spf13/cobra"
)
func main() {
- var rootCmd = &cobra.Command{
- Use: "hugo",
- Short: "Hugo is a very fast static site generator",
- Long: `A Fast and Flexible Static Site Generator built with
- love by spf13 and friends in Go.
- Complete documentation is available at http://hugo.spf13.com`,
- RunE: func(cmd *cobra.Command, args []string) error {
- // Do Stuff Here
- return errors.New("some random error")
- },
- }
-
- if err := rootCmd.Execute(); err != nil {
- log.Fatal(err)
- }
+ var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+love by spf13 and friends in Go.
+Complete documentation is available at http://hugo.spf13.com`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Do Stuff Here
+ return errors.New("some random error")
+ },
+ }
+
+ if err := rootCmd.Execute(); err != nil {
+ log.Fatal(err)
+ }
}
```
@@ -932,16 +914,6 @@ Cobra can generate a man page based on the subcommands, flags, etc. A simple exa
Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
-## Debugging
-
-Cobra provides a ‘DebugFlags’ method on a command which, when called, will print
-out everything Cobra knows about the flags for each command.
-
-### Example
-
-```go
-command.DebugFlags()
-```
## Extensions
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index e0cfb3494..e402065dd 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -463,14 +463,14 @@ func gen(buf *bytes.Buffer, cmd *Command) {
}
// GenBashCompletion generates bash completion file and writes to the passed writer.
-func (cmd *Command) GenBashCompletion(w io.Writer) error {
+func (c *Command) GenBashCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
- writePreamble(buf, cmd.Name())
- if len(cmd.BashCompletionFunction) > 0 {
- buf.WriteString(cmd.BashCompletionFunction + "\n")
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
}
- gen(buf, cmd)
- writePostscript(buf, cmd.Name())
+ gen(buf, c)
+ writePostscript(buf, c.Name())
_, err := buf.WriteTo(w)
return err
@@ -481,24 +481,24 @@ func nonCompletableFlag(flag *pflag.Flag) bool {
}
// GenBashCompletionFile generates bash completion file.
-func (cmd *Command) GenBashCompletionFile(filename string) error {
+func (c *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
- return cmd.GenBashCompletion(outFile)
+ return c.GenBashCompletion(outFile)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists.
-func (cmd *Command) MarkFlagRequired(name string) error {
- return MarkFlagRequired(cmd.Flags(), name)
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
}
// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists.
-func (cmd *Command) MarkPersistentFlagRequired(name string) error {
- return MarkFlagRequired(cmd.PersistentFlags(), name)
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists.
@@ -508,20 +508,20 @@ func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
-func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error {
- return MarkFlagFilename(cmd.Flags(), name, extensions...)
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// Generated bash autocompletion will call the bash function f for the flag.
-func (cmd *Command) MarkFlagCustom(name string, f string) error {
- return MarkFlagCustom(cmd.Flags(), name, f)
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
}
// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
-func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
- return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...)
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go
index 071a6a2a2..a3b13a32a 100644
--- a/vendor/github.com/spf13/cobra/bash_completions_test.go
+++ b/vendor/github.com/spf13/cobra/bash_completions_test.go
@@ -118,7 +118,7 @@ func TestBashCompletions(t *testing.T) {
check(t, str, `flags_completion+=("_filedir")`)
// check for filename extension flags
check(t, str, `must_have_one_noun+=("three")`)
- // check for filename extention flags
+ // check for filename extension flags
check(t, str, `flags_completion+=("__handle_filename_extension_flag json|yaml|yml")`)
// check for custom flags
check(t, str, `flags_completion+=("__complete_custom")`)
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 4f65d7708..185e45263 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -54,13 +54,14 @@ type Command struct {
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string
+ // Expected arguments
+ Args PositionalArgs
+
// ArgAliases is List of aliases for ValidArgs.
// These are not suggested to the user in the bash completion,
// but accepted if entered manually.
ArgAliases []string
- // Expected arguments
- Args PositionalArgs
// BashCompletionFunction is custom functions used by the bash autocompletion generator.
BashCompletionFunction string
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index b350aeeca..889c22e27 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -4,17 +4,29 @@ import (
"bytes"
"fmt"
"io"
+ "os"
"strings"
)
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenZshCompletion(outFile)
+}
+
// GenZshCompletion generates a zsh completion file and writes to the passed writer.
-func (cmd *Command) GenZshCompletion(w io.Writer) error {
+func (c *Command) GenZshCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
- writeHeader(buf, cmd)
- maxDepth := maxDepth(cmd)
+ writeHeader(buf, c)
+ maxDepth := maxDepth(c)
writeLevelMapping(buf, maxDepth)
- writeLevelCases(buf, maxDepth, cmd)
+ writeLevelCases(buf, maxDepth, c)
_, err := buf.WriteTo(w)
return err
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
index edeff3a86..ae5aaf711 100644
--- a/vendor/github.com/spf13/jwalterweatherman/notepad.go
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -148,7 +148,7 @@ func (n *Notepad) GetStdoutThreshold() Threshold {
}
// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
-// brackets at the begining of the line. An empty prefix won't be displayed at all.
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
func (n *Notepad) SetPrefix(prefix string) {
if len(prefix) != 0 {
n.prefix = "[" + prefix + "] "
diff --git a/vendor/github.com/xenolf/lego/CHANGELOG.md b/vendor/github.com/xenolf/lego/CHANGELOG.md
index 7dc1c1163..d71cc23dc 100644
--- a/vendor/github.com/xenolf/lego/CHANGELOG.md
+++ b/vendor/github.com/xenolf/lego/CHANGELOG.md
@@ -1,5 +1,16 @@
# Changelog
+## [0.4.1] - 2017-09-26
+
+### Added:
+- lib: A new DNS provider for OTC.
+- lib: The `AWS_HOSTED_ZONE_ID` environment variable for the Route53 DNS provider to directly specify the zone.
+- lib: The `RFC2136_TIMEOUT` enviroment variable to make the timeout for the RFC2136 provider configurable.
+- lib: The `GCE_SERVICE_ACCOUNT_FILE` environment variable to specify a service account file for the Google Cloud DNS provider.
+
+### Fixed:
+- lib: Fixed an authentication issue with the latest Azure SDK.
+
## [0.4.0] - 2017-07-13
### Added:
diff --git a/vendor/github.com/xenolf/lego/Dockerfile b/vendor/github.com/xenolf/lego/Dockerfile
index c03964076..511e403ce 100644
--- a/vendor/github.com/xenolf/lego/Dockerfile
+++ b/vendor/github.com/xenolf/lego/Dockerfile
@@ -1,14 +1,15 @@
-FROM alpine:3.4
+FROM alpine:3.6
ENV GOPATH /go
+ENV LEGO_VERSION tags/v0.4.1
-RUN apk update && apk add ca-certificates go git && \
- rm -rf /var/cache/apk/* && \
+RUN apk update && apk add --no-cache --virtual run-dependencies ca-certificates && \
+ apk add --no-cache --virtual build-dependencies go git musl-dev && \
go get -u github.com/xenolf/lego && \
- cd /go/src/github.com/xenolf/lego && \
+ cd ${GOPATH}/src/github.com/xenolf/lego && \
+ git checkout ${LEGO_VERSION} && \
go build -o /usr/bin/lego . && \
- apk del go git && \
- rm -rf /var/cache/apk/* && \
- rm -rf /go
+ apk del build-dependencies && \
+ rm -rf ${GOPATH}
ENTRYPOINT [ "/usr/bin/lego" ]
diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md
index 27b346b22..b9c374432 100644
--- a/vendor/github.com/xenolf/lego/README.md
+++ b/vendor/github.com/xenolf/lego/README.md
@@ -83,7 +83,7 @@ USAGE:
lego [global options] command [command options] [arguments...]
VERSION:
- 0.4.0
+ 0.4.1
COMMANDS:
run Register an account, then create and install a certificate
@@ -93,7 +93,7 @@ COMMANDS:
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
- --domains value, -d value Add domains to the process
+ --domains value, -d value Add a domain to the process. Can be specified multiple times.
--csr value, -c value Certificate signing request filename, if an external CSR is to be used
--server value, -s value CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client. (default: "https://acme-v01.api.letsencrypt.org/directory")
--email value, -m value Email used for registration and recovery contact.
diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go
index 2518d2e8f..3aac9e253 100644
--- a/vendor/github.com/xenolf/lego/cli.go
+++ b/vendor/github.com/xenolf/lego/cli.go
@@ -32,7 +32,7 @@ func main() {
app.Name = "lego"
app.Usage = "Let's Encrypt client written in Go"
- version := "0.4.0"
+ version := "0.4.1"
if strings.HasPrefix(gittag, "v") {
version = gittag
}
@@ -109,7 +109,7 @@ func main() {
app.Flags = []cli.Flag{
cli.StringSliceFlag{
Name: "domains, d",
- Usage: "Add domains to the process",
+ Usage: "Add a domain to the process. Can be specified multiple times.",
},
cli.StringFlag{
Name: "csr, c",
diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
index 97d692382..04897aa18 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
@@ -125,11 +125,10 @@ func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) {
}
// Now we want to to Azure and get the zone.
- dc := dns.NewZonesClient(c.subscriptionId)
-
- rsc := dns.NewRecordSetsClient(c.subscriptionId)
spt, err := c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint)
- rsc.Authorizer = autorest.NewBearerAuthorizer(spt)
+
+ dc := dns.NewZonesClient(c.subscriptionId)
+ dc.Authorizer = autorest.NewBearerAuthorizer(spt)
zone, err := dc.Get(c.resourceGroup, acme.UnFqdn(authZone))
diff --git a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
index 3b6b58d08..7b2fccc98 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
@@ -7,7 +7,7 @@ import (
"fmt"
"os"
- "github.com/pyr/egoscale/src/egoscale"
+ "github.com/exoscale/egoscale"
"github.com/xenolf/lego/acme"
)
diff --git a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go
index 2b106a27e..13daa8c8a 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/rackspace/rackspace.go
@@ -269,7 +269,7 @@ func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawM
return r, nil
}
-// RackspaceRecords is the list of records sent/recieved from the DNS API
+// RackspaceRecords is the list of records sent/received from the DNS API
type RackspaceRecords struct {
RackspaceRecord []RackspaceRecord `json:"records"`
}