summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorHarshavardhana <harsha@minio.io>2016-10-26 05:21:07 -0700
committerChristopher Speller <crspeller@gmail.com>2016-10-26 08:21:07 -0400
commitf02620b291b988848392c455a7719699f6b5c00f (patch)
tree695e07607e86b000b9fe78e77df7f33673f1a755 /vendor/github.com
parentb354d25d3731b53613489d95cfa4c946cf8e0888 (diff)
downloadchat-f02620b291b988848392c455a7719699f6b5c00f.tar.gz
chat-f02620b291b988848392c455a7719699f6b5c00f.tar.bz2
chat-f02620b291b988848392c455a7719699f6b5c00f.zip
Moving away from goamz to use minio-go instead. (#4193)
minio-go does fully managed way of handling S3 API requests - Automatic bucket location management across all s3 regions. - Transparently upload large files in multipart if file 64MB or larger. - Right GetObject() API provides compatibility with io.ReadWriteSeeker interface. - Various other APIs including bulk deletes, server side object copy, bucket policies and bucket notifications. Fixes #4182
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/minio/minio-go/.gitignore2
-rw-r--r--vendor/github.com/minio/minio-go/.travis.yml23
-rw-r--r--vendor/github.com/minio/minio-go/CONTRIBUTING.md23
-rw-r--r--vendor/github.com/minio/minio-go/LICENSE202
-rw-r--r--vendor/github.com/minio/minio-go/MAINTAINERS.md19
-rw-r--r--vendor/github.com/minio/minio-go/README.md250
-rw-r--r--vendor/github.com/minio/minio-go/api-datatypes.go76
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response.go235
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response_test.go263
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object-file.go104
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go643
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go95
-rw-r--r--vendor/github.com/minio/minio-go/api-list.go698
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go215
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go177
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go294
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket_test.go274
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go225
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go68
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file.go307
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go393
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-progress.go108
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-readat.go246
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go315
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go273
-rw-r--r--vendor/github.com/minio/minio-go/api-s3-datatypes.go243
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go120
-rw-r--r--vendor/github.com/minio/minio-go/api.go680
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v2_test.go1293
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v4_test.go2044
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go394
-rw-r--r--vendor/github.com/minio/minio-go/appveyor.yml37
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go197
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache_test.go323
-rw-r--r--vendor/github.com/minio/minio-go/bucket-notification.go228
-rw-r--r--vendor/github.com/minio/minio-go/constants.go46
-rw-r--r--vendor/github.com/minio/minio-go/copy-conditions.go97
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md1101
-rw-r--r--vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go59
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/bucketexists.go51
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/copyobject.go67
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fgetobject.go45
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fputobject.go45
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go55
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go55
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getobject.go63
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go56
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listbuckets.go48
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go57
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go76
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjects.go57
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go57
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/makebucket.go46
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go53
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go59
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go47
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go64
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject.go53
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go49
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removebucket.go48
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go46
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeobject.go45
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeobjects.go61
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go85
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go54
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/statobject.go45
-rw-r--r--vendor/github.com/minio/minio-go/hook-reader.go70
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go115
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go289
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go635
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go1822
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset.go196
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset_test.go322
-rw-r--r--vendor/github.com/minio/minio-go/post-policy.go191
-rw-r--r--vendor/github.com/minio/minio-go/request-signature-v2.go322
-rw-r--r--vendor/github.com/minio/minio-go/request-signature-v2_test.go35
-rw-r--r--vendor/github.com/minio/minio-go/request-signature-v4.go303
-rw-r--r--vendor/github.com/minio/minio-go/retry.go138
-rw-r--r--vendor/github.com/minio/minio-go/s3-endpoints.go44
-rw-r--r--vendor/github.com/minio/minio-go/signature-type.go37
-rw-r--r--vendor/github.com/minio/minio-go/tempfile.go60
-rw-r--r--vendor/github.com/minio/minio-go/test-utils_test.go64
-rw-r--r--vendor/github.com/minio/minio-go/utils.go383
-rw-r--r--vendor/github.com/minio/minio-go/utils_test.go436
-rw-r--r--vendor/github.com/vaughan0/go-ini/LICENSE14
-rw-r--r--vendor/github.com/vaughan0/go-ini/README.md70
-rw-r--r--vendor/github.com/vaughan0/go-ini/ini.go123
-rw-r--r--vendor/github.com/vaughan0/go-ini/ini_linux_test.go43
-rw-r--r--vendor/github.com/vaughan0/go-ini/ini_test.go89
-rw-r--r--vendor/github.com/vaughan0/go-ini/test.ini2
90 files changed, 19339 insertions, 341 deletions
diff --git a/vendor/github.com/minio/minio-go/.gitignore b/vendor/github.com/minio/minio-go/.gitignore
new file mode 100644
index 000000000..acf19db3a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/.gitignore
@@ -0,0 +1,2 @@
+*~
+*.test \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml
new file mode 100644
index 000000000..f61da45b6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/.travis.yml
@@ -0,0 +1,23 @@
+sudo: false
+language: go
+
+os:
+- linux
+- osx
+
+env:
+- ARCH=x86_64
+- ARCH=i686
+
+go:
+- 1.5.3
+- 1.6
+
+script:
+- diff -au <(gofmt -d .) <(printf "")
+- go vet ./...
+- go test -short -race -v ./...
+
+notifications:
+ slack:
+ secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8=
diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md
new file mode 100644
index 000000000..8b1ee86c6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+
+### Developer Guidelines
+
+``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
+
+* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
+ - Fork it
+ - Create your feature branch (git checkout -b my-new-feature)
+ - Commit your changes (git commit -am 'Add some feature')
+ - Push to the branch (git push origin my-new-feature)
+ - Create new Pull Request
+
+* When you're ready to create a pull request, be sure to:
+ - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
+ - Run `go fmt`
+ - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
+ - Make sure `go test -race ./...` and `go build` completes.
+ NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
+ ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
+
+* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
+ - `minio-go` project is strictly conformant with Golang style
+ - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
new file mode 100644
index 000000000..6dbef6265
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md
@@ -0,0 +1,19 @@
+# For maintainers only
+
+## Responsibilities
+
+Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
+
+### Making new releases
+
+Edit `libraryVersion` constant in `api.go`.
+
+```
+$ grep libraryVersion api.go
+ libraryVersion = "0.3.0"
+```
+
+```
+$ git tag 0.3.0
+$ git push --tags
+``` \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
new file mode 100644
index 000000000..16ed88685
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -0,0 +1,250 @@
+# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
+
+**Supported cloud storage providers:**
+
+- AWS Signature Version 4
+ - Amazon S3
+ - Minio
+
+
+- AWS Signature Version 2
+ - Google Cloud Storage (Compatibility Mode)
+ - Openstack Swift + Swift3 middleware
+ - Ceph Object Gateway
+ - Riak CS
+
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
+
+This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
+
+
+## Download from Github
+
+```sh
+
+$ go get -u github.com/minio/minio-go
+
+```
+## Initialize Minio Client
+
+You need four items to connect to Minio object storage server.
+
+
+
+| Parameter | Description|
+| :--- | :--- |
+| endpoint | URL to object storage service. |
+| accessKeyID | Access key is the user ID that uniquely identifies your account. |
+| secretAccessKey | Secret key is the password to your account. |
+| secure | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+```go
+
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("%v", minioClient) // minioClient is now setup
+
+
+```
+
+## Quick Start Example - File Uploader
+
+This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
+
+
+
+
+We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
+
+#### FileUploader.go
+
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Make a new bucked called mymusic.
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // Check to see if we already own this bucket (which happens if you run this twice)
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // Upload the zip file
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // Upload the zip file with FPutObject
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
+
+#### Run FileUploader
+
+```sh
+
+$ go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+
+$ mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+
+```
+
+## API Reference
+
+The full API Reference is available here.
+
+* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API Reference : Bucket Operations
+
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API Reference : Bucket policy Operations
+
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
+
+### API Reference : Bucket notification Operations
+
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API Reference : File Object Operations
+
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+
+### API Reference : Object Operations
+
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API Reference : Presigned Operations
+
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+## Full Examples
+
+#### Full Examples : Bucket Operations
+
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+#### Full Examples : Bucket policy Operations
+
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+#### Full Examples : Bucket notification Operations
+
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
+
+#### Full Examples : File Object Operations
+
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+
+#### Full Examples : Object Operations
+
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+#### Full Examples : Presigned Operations
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## Explore Further
+* [Complete Documentation](https://docs.minio.io)
+* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
+
+## Contribute
+
+[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
+
+[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
+[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
+
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
new file mode 100644
index 000000000..0871b1cfb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -0,0 +1,76 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "time"
+
+// BucketInfo container for bucket metadata.
+type BucketInfo struct {
+ // The name of the bucket.
+ Name string `json:"name"`
+ // Date the bucket was created.
+ CreationDate time.Time `json:"creationDate"`
+}
+
+// ObjectInfo container for object metadata.
+type ObjectInfo struct {
+ // An ETag is optionally set to md5sum of an object. In case of multipart objects,
+ // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
+ // each parts concatenated into one string.
+ ETag string `json:"etag"`
+
+ Key string `json:"name"` // Name of the object
+ LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
+ Size int64 `json:"size"` // Size in bytes of the object.
+ ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+
+ // Owner name.
+ Owner struct {
+ DisplayName string `json:"name"`
+ ID string `json:"id"`
+ } `json:"owner"`
+
+ // The class of storage used to store the object.
+ StorageClass string `json:"storageClass"`
+
+ // Error
+ Err error `json:"-"`
+}
+
+// ObjectMultipartInfo container for multipart object metadata.
+type ObjectMultipartInfo struct {
+ // Date and time at which the multipart upload was initiated.
+ Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Initiator initiator
+ Owner owner
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass string
+
+ // Key of the object for which the multipart upload was initiated.
+ Key string
+
+ // Size in bytes of the object.
+ Size int64
+
+ // Upload ID that identifies the multipart upload.
+ UploadID string `xml:"UploadId"`
+
+ // Error
+ Err error
+}
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
new file mode 100644
index 000000000..bcfad3761
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -0,0 +1,235 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "strconv"
+)
+
+/* **** SAMPLE ERROR RESPONSE ****
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>AccessDenied</Code>
+ <Message>Access Denied</Message>
+ <BucketName>bucketName</BucketName>
+ <Key>objectName</Key>
+ <RequestId>F19772218238A85A</RequestId>
+ <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
+</Error>
+*/
+
+// ErrorResponse - Is the typed error returned by all API operations.
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+}
+
+// ToErrorResponse - Returns parsed ErrorResponse struct from body and
+// http headers.
+//
+// For example:
+//
+// import s3 "github.com/minio/minio-go"
+// ...
+// ...
+// reader, stat, err := s3.GetObject(...)
+// if err != nil {
+// resp := s3.ToErrorResponse(err)
+// }
+// ...
+func ToErrorResponse(err error) ErrorResponse {
+ switch err := err.(type) {
+ case ErrorResponse:
+ return err
+ default:
+ return ErrorResponse{}
+ }
+}
+
+// Error - Returns HTTP error string
+func (e ErrorResponse) Error() string {
+ return e.Message
+}
+
+// Common string for errors to report issue location in unexpected
+// cases.
+const (
+ reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
+)
+
+// httpRespToErrorResponse returns a new encoded ErrorResponse
+// structure as error.
+func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return ErrInvalidArgument(msg)
+ }
+ var errResp ErrorResponse
+ err := xmlDecoder(resp.Body, &errResp)
+ // Xml decoding failed with no body, fall back to HTTP headers.
+ if err != nil {
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ if objectName == "" {
+ errResp = ErrorResponse{
+ Code: "NoSuchBucket",
+ Message: "The specified bucket does not exist.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ } else {
+ errResp = ErrorResponse{
+ Code: "NoSuchKey",
+ Message: "The specified key does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ case http.StatusForbidden:
+ errResp = ErrorResponse{
+ Code: "AccessDenied",
+ Message: "Access Denied.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ case http.StatusConflict:
+ errResp = ErrorResponse{
+ Code: "Conflict",
+ Message: "Bucket not empty.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ errResp = ErrorResponse{
+ Code: resp.Status,
+ Message: resp.Status,
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+ return errResp
+}
+
+// ErrEntityTooLarge - Input size is larger than supported maximum.
+func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
+ return ErrorResponse{
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrEntityTooSmall - Input size is smaller than supported minimum.
+func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
+ return ErrorResponse{
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrUnexpectedEOF - Unexpected end of file reached.
+func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
+ strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
+ return ErrorResponse{
+ Code: "UnexpectedEOF",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrInvalidBucketName - Invalid bucket name response.
+func ErrInvalidBucketName(message string) error {
+ return ErrorResponse{
+ Code: "InvalidBucketName",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectName - Invalid object name response.
+func ErrInvalidObjectName(message string) error {
+ return ErrorResponse{
+ Code: "NoSuchKey",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectPrefix - Invalid object prefix response is
+// similar to object name response.
+var ErrInvalidObjectPrefix = ErrInvalidObjectName
+
+// ErrInvalidArgument - Invalid argument response.
+func ErrInvalidArgument(message string) error {
+ return ErrorResponse{
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrNoSuchBucketPolicy - No Such Bucket Policy response
+// The specified bucket does not have a bucket policy.
+func ErrNoSuchBucketPolicy(message string) error {
+ return ErrorResponse{
+ Code: "NoSuchBucketPolicy",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrAPINotSupported - API not supported response
+// The specified API call is not supported
+func ErrAPINotSupported(message string) error {
+ return ErrorResponse{
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-error-response_test.go b/vendor/github.com/minio/minio-go/api-error-response_test.go
new file mode 100644
index 000000000..11f57165f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-error-response_test.go
@@ -0,0 +1,263 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required bZy applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "testing"
+)
+
+// Tests validate the Error generator function for http response with error.
+func TestHttpRespToErrorResponse(t *testing.T) {
+ // 'genAPIErrorResponse' generates ErrorResponse for given APIError.
+ // provides a encodable populated response values.
+ genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
+ var errResp = ErrorResponse{}
+ errResp.Code = err.Code
+ errResp.Message = err.Description
+ errResp.BucketName = bucketName
+ return errResp
+ }
+
+ // Encodes the response headers into XML format.
+ encodeErr := func(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ encode := xml.NewEncoder(&bytesBuffer)
+ encode.Encode(response)
+ return bytesBuffer.Bytes()
+ }
+
+ // `createAPIErrorResponse` Mocks XML error response from the server.
+ createAPIErrorResponse := func(APIErr APIError, bucketName string) *http.Response {
+ // generate error response.
+ // response body contains the XML error message.
+ resp := &http.Response{}
+ errorResponse := genAPIErrorResponse(APIErr, bucketName)
+ encodedErrorResponse := encodeErr(errorResponse)
+ // write Header.
+ resp.StatusCode = APIErr.HTTPStatusCode
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
+
+ return resp
+ }
+
+ // 'genErrResponse' contructs error response based http Status Code
+ genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
+ errResp := ErrorResponse{
+ Code: code,
+ Message: message,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ return errResp
+ }
+
+ // Generate invalid argument error.
+ genInvalidError := func(message string) error {
+ errResp := ErrorResponse{
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+ return errResp
+ }
+
+ // Set common http response headers.
+ setCommonHeaders := func(resp *http.Response) *http.Response {
+ // set headers.
+ resp.Header = make(http.Header)
+ resp.Header.Set("x-amz-request-id", "xyz")
+ resp.Header.Set("x-amz-id-2", "abc")
+ resp.Header.Set("x-amz-bucket-region", "us-east-1")
+ return resp
+ }
+
+ // Generate http response with empty body.
+ // Set the StatusCode to the argument supplied.
+ // Sets common headers.
+ genEmptyBodyResponse := func(statusCode int) *http.Response {
+ resp := &http.Response{}
+ // set empty response body.
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte("")))
+ // set headers.
+ setCommonHeaders(resp)
+ // set status code.
+ resp.StatusCode = statusCode
+ return resp
+ }
+
+ // Decode XML error message from the http response body.
+ decodeXMLError := func(resp *http.Response, t *testing.T) error {
+ var errResp ErrorResponse
+ err := xmlDecoder(resp.Body, &errResp)
+ if err != nil {
+ t.Fatal("XML decoding of response body failed")
+ }
+ return errResp
+ }
+
+ // List of APIErrors used to generate/mock server side XML error response.
+ APIErrors := []APIError{
+ {
+ Code: "NoSuchBucketPolicy",
+ Description: "The specified bucket does not have a bucket policy.",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ }
+
+ // List of expected response.
+ // Used for asserting the actual response.
+ expectedErrResponse := []error{
+ genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
+ decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t),
+ genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
+ genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""),
+ }
+
+ // List of http response to be used as input.
+ inputResponses := []*http.Response{
+ nil,
+ createAPIErrorResponse(APIErrors[0], "minio-bucket"),
+ genEmptyBodyResponse(http.StatusNotFound),
+ genEmptyBodyResponse(http.StatusNotFound),
+ genEmptyBodyResponse(http.StatusForbidden),
+ genEmptyBodyResponse(http.StatusConflict),
+ genEmptyBodyResponse(http.StatusBadRequest),
+ }
+
+ testCases := []struct {
+ bucketName string
+ objectName string
+ inputHTTPResp *http.Response
+ // expected results.
+ expectedResult error
+ // flag indicating whether tests should pass.
+
+ }{
+ {"minio-bucket", "", inputResponses[0], expectedErrResponse[0]},
+ {"minio-bucket", "", inputResponses[1], expectedErrResponse[1]},
+ {"minio-bucket", "", inputResponses[2], expectedErrResponse[2]},
+ {"minio-bucket", "Asia/", inputResponses[3], expectedErrResponse[3]},
+ {"minio-bucket", "", inputResponses[4], expectedErrResponse[4]},
+ {"minio-bucket", "", inputResponses[5], expectedErrResponse[5]},
+ }
+
+ for i, testCase := range testCases {
+ actualResult := httpRespToErrorResponse(testCase.inputHTTPResp, testCase.bucketName, testCase.objectName)
+ if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
+ t.Errorf("Test %d: Expected result to be '%+v', but instead got '%+v'", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Test validates 'ErrEntityTooLarge' error response.
+func TestErrEntityTooLarge(t *testing.T) {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999)
+ expectedResult := ErrorResponse{
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: "minio-bucket",
+ Key: "Asia/",
+ }
+ actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
+
+// Test validates 'ErrEntityTooSmall' error response.
+func TestErrEntityTooSmall(t *testing.T) {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1)
+ expectedResult := ErrorResponse{
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: "minio-bucket",
+ Key: "Asia/",
+ }
+ actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
+
+// Test validates 'ErrUnexpectedEOF' error response.
+func TestErrUnexpectedEOF(t *testing.T) {
+ msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
+ strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
+ expectedResult := ErrorResponse{
+ Code: "UnexpectedEOF",
+ Message: msg,
+ BucketName: "minio-bucket",
+ Key: "Asia/",
+ }
+ actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
+
+// Test validates 'ErrInvalidBucketName' error response.
+func TestErrInvalidBucketName(t *testing.T) {
+ expectedResult := ErrorResponse{
+ Code: "InvalidBucketName",
+ Message: "Invalid Bucket name",
+ RequestID: "minio",
+ }
+ actualResult := ErrInvalidBucketName("Invalid Bucket name")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
+
+// Test validates 'ErrInvalidObjectName' error response.
+func TestErrInvalidObjectName(t *testing.T) {
+ expectedResult := ErrorResponse{
+ Code: "NoSuchKey",
+ Message: "Invalid Object Key",
+ RequestID: "minio",
+ }
+ actualResult := ErrInvalidObjectName("Invalid Object Key")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
+
+// Test validates 'ErrInvalidArgument' response.
+func TestErrInvalidArgument(t *testing.T) {
+ expectedResult := ErrorResponse{
+ Code: "InvalidArgument",
+ Message: "Invalid Argument",
+ RequestID: "minio",
+ }
+ actualResult := ErrInvalidArgument("Invalid Argument")
+ if !reflect.DeepEqual(expectedResult, actualResult) {
+ t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
new file mode 100644
index 000000000..a38fc852a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -0,0 +1,104 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// FGetObject - download contents of an object to a local file.
+func (c Client) FGetObject(bucketName, objectName, filePath string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Verify if destination already exists.
+ st, err := os.Stat(filePath)
+ if err == nil {
+ // If the destination exists and is a directory.
+ if st.IsDir() {
+ return ErrInvalidArgument("fileName is a directory.")
+ }
+ }
+
+ // Proceed if file does not exist. return for all other errors.
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ // Extract top level directory.
+ objectDir, _ := filepath.Split(filePath)
+ if objectDir != "" {
+ // Create any missing top level directories.
+ if err := os.MkdirAll(objectDir, 0700); err != nil {
+ return err
+ }
+ }
+
+ // Gather md5sum.
+ objectStat, err := c.StatObject(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+
+ // Write to a temporary file "fileName.part.minio" before saving.
+ filePartPath := filePath + objectStat.ETag + ".part.minio"
+
+ // If exists, open in append mode. If not create it as a part file.
+ filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+
+ // Issue Stat to get the current offset.
+ st, err = filePart.Stat()
+ if err != nil {
+ return err
+ }
+
+ // Seek to current position for incoming reader.
+ objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
+ if err != nil {
+ return err
+ }
+
+ // Write to the part file.
+ if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
+ return err
+ }
+
+ // Close the file before rename, this is specifically needed for Windows users.
+ if err = filePart.Close(); err != nil {
+ return err
+ }
+
+ // Safely completed. Now commit by renaming to actual filename.
+ if err = os.Rename(filePartPath, filePath); err != nil {
+ return err
+ }
+
+ // Return.
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
new file mode 100644
index 000000000..1f0dabb05
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -0,0 +1,643 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+)
+
+// GetObject - returns an seekable, readable object.
+func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+
+ var httpReader io.ReadCloser
+ var objectInfo ObjectInfo
+ var err error
+ // Create request channel.
+ reqCh := make(chan getRequest)
+ // Create response channel.
+ resCh := make(chan getResponse)
+ // Create done channel.
+ doneCh := make(chan struct{})
+
+ // This routine feeds partial object data as and when the caller reads.
+ go func() {
+ defer close(reqCh)
+ defer close(resCh)
+
+ // Loop through the incoming control messages and read data.
+ for {
+ select {
+ // When the done channel is closed exit our routine.
+ case <-doneCh:
+ // Close the http response body before returning.
+ // This ends the connection with the server.
+ if httpReader != nil {
+ httpReader.Close()
+ }
+ return
+
+ // Gather incoming request.
+ case req := <-reqCh:
+ // If this is the first request we may not need to do a getObject request yet.
+ if req.isFirstReq {
+ // First request is a Read/ReadAt.
+ if req.isReadOp {
+ // Differentiate between wanting the whole object and just a range.
+ if req.isReadAt {
+ // If this is a ReadAt request only get the specified range.
+ // Range is set with respect to the offset and length of the buffer requested.
+ // Do not set objectInfo from the first readAt request because it will not get
+ // the whole object.
+ httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
+ } else {
+ // First request is a Read request.
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ // Read at least firstReq.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ }
+ } else {
+ // First request is a Stat or Seek call.
+ // Only need to run a StatObject until an actual Read or ReadAt request comes through.
+ objectInfo, err = c.StatObject(bucketName, objectName)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the go-routine.
+ return
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ }
+ } else if req.settingObjectInfo { // Request is just to get objectInfo.
+ objectInfo, err := c.StatObject(bucketName, objectName)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Send back the objectInfo.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ } else {
+ // Offset changes fetch the new object at an Offset.
+ // Because the httpReader may not be set by the first
+ // request if it was a stat or seek it must be checked
+ // if the object has been read or not to only initialize
+ // new ones when they haven't been already.
+ // All readAt requests are new requests.
+ if req.DidOffsetChange || !req.beenRead {
+ if httpReader != nil {
+ // Close previously opened http reader.
+ httpReader.Close()
+ }
+ // If this request is a readAt only get the specified range.
+ if req.isReadAt {
+ // Range is set with respect to the offset and length of the buffer requested.
+ httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
+ } else {
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ }
+
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Reply back how much was read.
+ resCh <- getResponse{
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ objectInfo: objectInfo,
+ }
+ }
+ }
+ }
+ }()
+
+ // Create a newObject through the information sent back by reqCh.
+ return newObject(reqCh, resCh, doneCh), nil
+}
+
+// get request message container to communicate with internal
+// go-routine.
+type getRequest struct {
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool // Tracks the offset changes for Seek requests.
+ beenRead bool // Determines if this is the first time an object is being read.
+ isReadAt bool // Determines if this request is a request to a specific range
+ isReadOp bool // Determines if this request is a Read or Read/At request.
+ isFirstReq bool // Determines if this request is the first time an object is being accessed.
+ settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
+}
+
+// get response message container to reply back for the request.
+type getResponse struct {
+ Size int
+ Error error
+ didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
+ objectInfo ObjectInfo // Used for the first request.
+}
+
+// Object represents an open object. It implements Read, ReadAt,
+// Seeker, Close for a HTTP stream.
+type Object struct {
+ // Mutex.
+ mutex *sync.Mutex
+
+ // User allocated and defined.
+ reqCh chan<- getRequest
+ resCh <-chan getResponse
+ doneCh chan<- struct{}
+ prevOffset int64
+ currOffset int64
+ objectInfo ObjectInfo
+
+ // Keeps track of closed call.
+ isClosed bool
+
+ // Keeps track of if this is the first call.
+ isStarted bool
+
+ // Previous error saved for future calls.
+ prevErr error
+
+ // Keeps track of if this object has been read yet.
+ beenRead bool
+
+ // Keeps track of if objectInfo has been set yet.
+ objectInfoSet bool
+}
+
+// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
+// Returns back the size of the buffer read, if anything was read, as well
+// as any error encountered. For all first requests sent on the object
+// it is also responsible for sending back the objectInfo.
+func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
+ o.reqCh <- request
+ response := <-o.resCh
+ // This was the first request.
+ if !o.isStarted {
+ // The object has been operated on.
+ o.isStarted = true
+ }
+ // Set the objectInfo if the request was not readAt
+ // and it hasn't been set before.
+ if !o.objectInfoSet && !request.isReadAt {
+ o.objectInfo = response.objectInfo
+ o.objectInfoSet = true
+ }
+ // Set beenRead only if it has not been set before.
+ if !o.beenRead {
+ o.beenRead = response.didRead
+ }
+ // Return any error to the top level.
+ if response.Error != nil {
+ return response, response.Error
+ }
+ return response, nil
+}
+
+// setOffset - handles the setting of offsets for
+// Read/ReadAt/Seek requests.
+func (o *Object) setOffset(bytesRead int64) error {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ // Save the current offset as previous offset.
+ o.prevOffset = o.currOffset
+
+ if o.currOffset >= o.objectInfo.Size {
+ return io.EOF
+ }
+ return nil
+}
+
+// Read reads up to len(p) bytes into p. It returns the number of
+// bytes read (0 <= n <= len(p)) and any error encountered. Returns
+// io.EOF upon end of file.
+func (o *Object) Read(b []byte) (n int, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is previous error saved from previous operation.
+ if o.prevErr != nil || o.isClosed {
+ return 0, o.prevErr
+ }
+ // Create a new request.
+ readReq := getRequest{
+ isReadOp: true,
+ beenRead: o.beenRead,
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readReq.isFirstReq = true
+ }
+
+ // Verify if offset has changed and currOffset is greater than
+ // previous offset. Perhaps due to Seek().
+ offsetChange := o.prevOffset - o.currOffset
+ if offsetChange < 0 {
+ offsetChange = -offsetChange
+ }
+ if offsetChange > 0 {
+ // Fetch the new reader at the current offset again.
+ readReq.Offset = o.currOffset
+ readReq.DidOffsetChange = true
+ } else {
+ // No offset changes no need to fetch new reader, continue
+ // reading.
+ readReq.DidOffsetChange = false
+ readReq.Offset = 0
+ }
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readReq)
+ if err != nil && err != io.EOF {
+ // Save the error for future calls.
+ o.prevErr = err
+ return response.Size, err
+ }
+
+ // Bytes read.
+ bytesRead := int64(response.Size)
+
+ // Set the new offset.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ // Save the error for future calls.
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+
+ // Return the response.
+ return response.Size, err
+}
+
+// Stat returns the ObjectInfo structure describing object.
+func (o *Object) Stat() (ObjectInfo, error) {
+ if o == nil {
+ return ObjectInfo{}, ErrInvalidArgument("Object is nil")
+ }
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
+ return ObjectInfo{}, o.prevErr
+ }
+
+ // This is the first request.
+ if !o.isStarted || !o.objectInfoSet {
+ statReq := getRequest{
+ isFirstReq: !o.isStarted,
+ settingObjectInfo: !o.objectInfoSet,
+ }
+
+ // Send the request and get the response.
+ _, err := o.doGetRequest(statReq)
+ if err != nil {
+ o.prevErr = err
+ return ObjectInfo{}, err
+ }
+ }
+
+ return o.objectInfo, nil
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset
+// off. It returns the number of bytes read and the error, if any.
+// ReadAt always returns a non-nil error when n < len(b). At end of
+// file, that error is io.EOF.
+func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is error which was saved in previous operation.
+ if o.prevErr != nil || o.isClosed {
+ return 0, o.prevErr
+ }
+
+ // Can only compare offsets to size when size has been set.
+ if o.objectInfoSet {
+ // If offset is negative than we return io.EOF.
+ // If offset is greater than or equal to object size we return io.EOF.
+ if offset >= o.objectInfo.Size || offset < 0 {
+ return 0, io.EOF
+ }
+ }
+
+ // Create the new readAt request.
+ readAtReq := getRequest{
+ isReadOp: true,
+ isReadAt: true,
+ DidOffsetChange: true, // Offset always changes.
+ beenRead: o.beenRead, // Set if this is the first request to try and read.
+ Offset: offset, // Set the offset.
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readAtReq.isFirstReq = true
+ }
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readAtReq)
+ if err != nil && err != io.EOF {
+ // Save the error.
+ o.prevErr = err
+ return response.Size, err
+ }
+ // Bytes read.
+ bytesRead := int64(response.Size)
+ // There is no valid objectInfo yet
+ // to compare against for EOF.
+ if !o.objectInfoSet {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ // Save the current offset as previous offset.
+ o.prevOffset = o.currOffset
+ } else {
+ // If this was not the first request update
+ // the offsets and compare against objectInfo
+ // for EOF.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+ }
+ return response.Size, err
+}
+
+// Seek sets the offset for the next Read or Write to offset,
+// interpreted according to whence: 0 means relative to the
+// origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end.
+// Seek returns the new offset and an error, if any.
+//
+// Seeking to a negative offset is an error. Seeking to any positive
+// offset is legal, subsequent io operations succeed until the
+// underlying object is not closed.
+func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ if o.prevErr != nil {
+ // At EOF seeking is legal allow only io.EOF, for any other errors we return.
+ if o.prevErr != io.EOF {
+ return 0, o.prevErr
+ }
+ }
+
+ // Negative offset is valid for whence of '2'.
+ if offset < 0 && whence != 2 {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
+ }
+
+ // This is the first request. So before anything else
+ // get the ObjectInfo.
+ if !o.isStarted || !o.objectInfoSet {
+ // Create the new Seek request.
+ seekReq := getRequest{
+ isReadOp: false,
+ Offset: offset,
+ isFirstReq: true,
+ }
+ // Send and receive from the seek request.
+ _, err := o.doGetRequest(seekReq)
+ if err != nil {
+ // Save the error.
+ o.prevErr = err
+ return 0, err
+ }
+ }
+ // Save current offset as previous offset.
+ o.prevOffset = o.currOffset
+
+ // Switch through whence.
+ switch whence {
+ default:
+ return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
+ case 0:
+ if offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ o.currOffset = offset
+ case 1:
+ if o.currOffset+offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ o.currOffset += offset
+ case 2:
+ // Seeking to positive offset is valid for whence '2', but
+ // since we are backing a Reader we have reached 'EOF' if
+ // offset is positive.
+ if offset > 0 {
+ return 0, io.EOF
+ }
+ // Seeking to negative position not allowed for whence.
+ if o.objectInfo.Size+offset < 0 {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
+ }
+ o.currOffset = o.objectInfo.Size + offset
+ }
+ // Reset the saved error since we successfully seeked, let the Read
+ // and ReadAt decide.
+ if o.prevErr == io.EOF {
+ o.prevErr = nil
+ }
+ // Return the effective offset.
+ return o.currOffset, nil
+}
+
+// Close - The behavior of Close after the first call returns error
+// for subsequent Close() calls.
+func (o *Object) Close() (err error) {
+ if o == nil {
+ return ErrInvalidArgument("Object is nil")
+ }
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // if already closed return an error.
+ if o.isClosed {
+ return o.prevErr
+ }
+
+ // Close successfully.
+ close(o.doneCh)
+
+ // Save for future operations.
+ errMsg := "Object is already closed. Bad file descriptor."
+ o.prevErr = errors.New(errMsg)
+ // Save here that we closed done channel successfully.
+ o.isClosed = true
+ return nil
+}
+
+// newObject instantiates a new *minio.Object*
+// ObjectInfo will be set by setObjectInfo
+func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
+ return &Object{
+ mutex: &sync.Mutex{},
+ reqCh: reqCh,
+ resCh: resCh,
+ doneCh: doneCh,
+ }
+}
+
+// getObject - retrieve object from Object Storage.
+//
+// Additionally this function also takes range arguments to download the specified
+// range bytes of an object. Setting offset and length = 0 will download the full object.
+//
+// For more information about the HTTP Range header.
+// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, ObjectInfo{}, err
+ }
+
+ customHeader := make(http.Header)
+ // Set ranges if length and offset are valid.
+ // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
+ if length > 0 && offset >= 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+ } else if offset > 0 && length == 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+ } else if length < 0 && offset == 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
+ }
+
+ // Execute GET on objectName.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ })
+ if err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ md5sum = strings.TrimSuffix(md5sum, "\"")
+
+ // Parse the date.
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ msg := "Last-Modified time format not recognized. " + reportIssue
+ return nil, ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: msg,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ // Get content-type.
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ var objectStat ObjectInfo
+ objectStat.ETag = md5sum
+ objectStat.Key = objectName
+ objectStat.Size = resp.ContentLength
+ objectStat.LastModified = date
+ objectStat.ContentType = contentType
+
+ // do not close body here, caller will close
+ return resp.Body, objectStat, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
new file mode 100644
index 000000000..07b1fa483
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -0,0 +1,95 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+// GetBucketPolicy - get bucket policy at a given path.
+func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
+}
+
+// GetBucketPolicy - get bucket policy rules at a given path.
+func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return map[string]policy.BucketPolicy{}, err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return map[string]policy.BucketPolicy{}, err
+ }
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return map[string]policy.BucketPolicy{}, err
+ }
+ return policy.GetPolicies(policyInfo.Statements, bucketName), nil
+}
+
+// Request server for policy.
+func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return policy.BucketAccessPolicy{}, err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
+ return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil
+ }
+ return policy.BucketAccessPolicy{}, errResponse
+ }
+ }
+ bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return policy.BucketAccessPolicy{}, err
+ }
+
+ policy := policy.BucketAccessPolicy{}
+ err = json.Unmarshal(bucketPolicyBuf, &policy)
+ return policy, err
+}
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
new file mode 100644
index 000000000..795de6183
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -0,0 +1,698 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// ListBuckets list all buckets owned by this authenticated user.
+//
+// This call requires explicit authentication, no anonymous requests are
+// allowed for listing buckets.
+//
+// api := client.New(....)
+// for message := range api.ListBuckets() {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListBuckets() ([]BucketInfo, error) {
+ // Execute GET on service.
+ resp, err := c.executeMethod("GET", requestMetadata{})
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, "", "")
+ }
+ }
+ listAllMyBucketsResult := listAllMyBucketsResult{}
+ err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
+ if err != nil {
+ return nil, err
+ }
+ return listAllMyBucketsResult.Buckets.Bucket, nil
+}
+
+/// Bucket Read Operations.
+
+// ListObjectsV2 lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel for pro-actively closing the internal go
+// routine. If you enable recursive as 'true' this function will
+// return back all the objects in a given bucket name and object
+// prefix.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+ // Return object owner information by default
+ fetchOwner := true
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+ // Validate incoming object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer close(objectStatCh)
+ // Save continuationToken for next request.
+ var continuationToken string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
+ if err != nil {
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ // Save the marker.
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectInfo{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send object prefixes.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If continuation token present, save it for next request.
+ if result.NextContinuationToken != "" {
+ continuationToken = result.NextContinuationToken
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) {
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ return listBucketV2Result{}, err
+ }
+ // Validate object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return listBucketV2Result{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Always set list-type in ListObjects V2
+ urlValues.Set("list-type", "2")
+
+ // Set object prefix.
+ if objectPrefix != "" {
+ urlValues.Set("prefix", objectPrefix)
+ }
+ // Set continuation token
+ if continuationToken != "" {
+ urlValues.Set("continuation-token", continuationToken)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // Fetch owner when listing
+ if fetchOwner {
+ urlValues.Set("fetch-owner", "true")
+ }
+
+ // maxkeys should default to 1000 or less.
+ if maxkeys == 0 || maxkeys > 1000 {
+ maxkeys = 1000
+ }
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return listBucketV2Result{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode listBuckets XML.
+ listBucketResult := listBucketV2Result{}
+ err = xmlDecoder(resp.Body, &listBucketResult)
+ if err != nil {
+ return listBucketResult, err
+ }
+ return listBucketResult, nil
+}
+
+// ListObjects - (List Objects) - List some objects or all recursively.
+//
+// ListObjects lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel for pro-actively closing the internal go
+// routine. If you enable recursive as 'true' this function will
+// return back all the objects in a given bucket name and object
+// prefix.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+ // Validate incoming object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer close(objectStatCh)
+ // Save marker for next request.
+ var marker string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
+ if err != nil {
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ // Save the marker.
+ marker = object.Key
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectInfo{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send object prefixes.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If next marker present, save it for next request.
+ if result.NextMarker != "" {
+ marker = result.NextMarker
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?marker - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ return listBucketResult{}, err
+ }
+ // Validate object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return listBucketResult{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ // Set object prefix.
+ if objectPrefix != "" {
+ urlValues.Set("prefix", objectPrefix)
+ }
+ // Set object marker.
+ if objectMarker != "" {
+ urlValues.Set("marker", objectMarker)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // maxkeys should default to 1000 or less.
+ if maxkeys == 0 || maxkeys > 1000 {
+ maxkeys = 1000
+ }
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return listBucketResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode listBuckets XML.
+ listBucketResult := listBucketResult{}
+ err = xmlDecoder(resp.Body, &listBucketResult)
+ if err != nil {
+ return listBucketResult, err
+ }
+ return listBucketResult, nil
+}
+
+// ListIncompleteUploads - List incompletely uploaded multipart objects.
+//
+// ListIncompleteUploads lists all incompleted objects matching the
+// objectPrefix from the specified bucket. If recursion is enabled
+// it would list all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel to pro-actively close the internal go routine.
+// If you enable recursive as 'true' this function will return back all
+// the multipart objects in a given bucket name.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
+ // Turn on size aggregation of individual parts.
+ isAggregateSize := true
+ return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
+}
+
+// listIncompleteUploads lists all incomplete uploads.
+func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
+ // Allocate channel for multipart uploads.
+ objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
+ // Delimiter is set to "/" by default.
+ delimiter := "/"
+ if recursive {
+ // If recursive do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ // Validate incoming object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
+ defer close(objectMultipartStatCh)
+ // object and upload ID marker for future requests.
+ var objectMarker string
+ var uploadIDMarker string
+ for {
+ // list all multipart uploads.
+ result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return
+ }
+ // Save objectMarker and uploadIDMarker for next request.
+ objectMarker = result.NextKeyMarker
+ uploadIDMarker = result.NextUploadIDMarker
+ // Send all multipart uploads.
+ for _, obj := range result.Uploads {
+ // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
+ if aggregateSize {
+ // Get total multipart size.
+ obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ }
+ }
+ select {
+ // Send individual uploads here.
+ case objectMultipartStatCh <- obj:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectMultipartInfo{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send delimited prefixes here.
+ case objectMultipartStatCh <- object:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Listing ends if result not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectMultipartStatCh)
+ // return.
+ return objectMultipartStatCh
+}
+
+// listMultipartUploads - (List Multipart Uploads).
+// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
+// request parameters. :-
+// ---------
+// ?key-marker - Specifies the multipart upload after which listing should begin.
+// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
+func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set uploads.
+ urlValues.Set("uploads", "")
+ // Set object key marker.
+ if keyMarker != "" {
+ urlValues.Set("key-marker", keyMarker)
+ }
+ // Set upload id marker.
+ if uploadIDMarker != "" {
+ urlValues.Set("upload-id-marker", uploadIDMarker)
+ }
+ // Set prefix marker.
+ if prefix != "" {
+ urlValues.Set("prefix", prefix)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // maxUploads should be 1000 or less.
+ if maxUploads == 0 || maxUploads > 1000 {
+ maxUploads = 1000
+ }
+ // Set max-uploads.
+ urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
+
+ // Execute GET on bucketName to list multipart uploads.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return listMultipartUploadsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode response body.
+ listMultipartUploadsResult := listMultipartUploadsResult{}
+ err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+ return listMultipartUploadsResult, nil
+}
+
+// listObjectParts list all object parts recursively.
+func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
+ // Part number marker for the next batch of request.
+ var nextPartNumberMarker int
+ partsInfo = make(map[int]objectPart)
+ for {
+ // Get list of uploaded parts a maximum of 1000 per request.
+ listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
+ if err != nil {
+ return nil, err
+ }
+ // Append to parts info.
+ for _, part := range listObjPartsResult.ObjectParts {
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ part.ETag = strings.TrimPrefix(part.ETag, "\"")
+ part.ETag = strings.TrimSuffix(part.ETag, "\"")
+ partsInfo[part.PartNumber] = part
+ }
+ // Keep part number marker, for the next iteration.
+ nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
+ // Listing ends result is not truncated, return right here.
+ if !listObjPartsResult.IsTruncated {
+ break
+ }
+ }
+
+ // Return all the parts.
+ return partsInfo, nil
+}
+
+// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
+func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
+ // Make list incomplete uploads recursive.
+ isRecursive := true
+ // Turn off size aggregation of individual parts, in this request.
+ isAggregateSize := false
+ // latestUpload to track the latest multipart info for objectName.
+ var latestUpload ObjectMultipartInfo
+ // Create done channel to cleanup the routine.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ // List all incomplete uploads.
+ for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
+ if mpUpload.Err != nil {
+ return "", mpUpload.Err
+ }
+ if objectName == mpUpload.Key {
+ if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
+ latestUpload = mpUpload
+ }
+ }
+ }
+ // Return the latest upload id.
+ return latestUpload.UploadID, nil
+}
+
+// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
+func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
+ // Iterate over all parts and aggregate the size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ for _, partInfo := range partsInfo {
+ size += partInfo.Size
+ }
+ return size, nil
+}
+
+// listObjectPartsQuery (List Parts query)
+// - lists some or all (up to 1000) parts that have been uploaded
+// for a specific multipart upload
+//
+// You can use the request parameters as selection criteria to return
+// a subset of the uploads in a bucket, request parameters :-
+// ---------
+// ?part-number-marker - Specifies the part after which listing should
+// begin.
+// ?max-parts - Maximum parts to be listed per request.
+func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number marker.
+ urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ // maxParts should be 1000 or less.
+ if maxParts == 0 || maxParts > 1000 {
+ maxParts = 1000
+ }
+ // Set max parts.
+ urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
+
+ // Execute GET on objectName to get list of parts.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return listObjectPartsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode list object parts XML.
+ listObjectPartsResult := listObjectPartsResult{}
+ err = xmlDecoder(resp.Body, &listObjectPartsResult)
+ if err != nil {
+ return listObjectPartsResult, err
+ }
+ return listObjectPartsResult, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
new file mode 100644
index 000000000..85e57805b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -0,0 +1,215 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bufio"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+)
+
+// GetBucketNotification - get bucket notification at a given path.
+func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return BucketNotification{}, err
+ }
+ notification, err := c.getBucketNotification(bucketName)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return notification, nil
+}
+
+// Request server for notification rules.
+func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return processBucketNotificationResponse(bucketName, resp)
+
+}
+
+// processes the GetNotification http response from the server.
+func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ return BucketNotification{}, errResponse
+ }
+ var bucketNotification BucketNotification
+ err := xmlDecoder(resp.Body, &bucketNotification)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return bucketNotification, nil
+}
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// Notification event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// Notification event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// Notification event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// NotificationEvent represents an Amazon an S3 bucket notification event.
+type NotificationEvent struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+}
+
+// NotificationInfo - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type NotificationInfo struct {
+ Records []NotificationEvent
+ Err error
+}
+
+// ListenBucketNotification - listen on bucket notifications.
+func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
+ notificationInfoCh := make(chan NotificationInfo, 1)
+ // Only success, start a routine to start reading line by line.
+ go func(notificationInfoCh chan<- NotificationInfo) {
+ defer close(notificationInfoCh)
+
+ // Validate the bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Check ARN partition to verify if listening bucket is supported
+ if isAmazonEndpoint(c.endpointURL) || isGoogleEndpoint(c.endpointURL) {
+ notificationInfoCh <- NotificationInfo{
+ Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
+ }
+ return
+ }
+
+ // Continously run and listen on bucket notification.
+ for {
+ urlValues := make(url.Values)
+ urlValues.Set("prefix", prefix)
+ urlValues.Set("suffix", suffix)
+ urlValues["events"] = events
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Validate http response, upon error return quickly.
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ notificationInfoCh <- NotificationInfo{
+ Err: errResponse,
+ }
+ return
+ }
+
+ // Initialize a new bufio scanner, to read line by line.
+ bio := bufio.NewScanner(resp.Body)
+
+ // Close the response body.
+ defer resp.Body.Close()
+
+ // Unmarshal each line, returns marshalled values.
+ for bio.Scan() {
+ var notificationInfo NotificationInfo
+ if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+ // Send notifications on channel only if there are events received.
+ if len(notificationInfo.Records) > 0 {
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-doneCh:
+ return
+ }
+ }
+ }
+ // Look for any underlying errors.
+ if err = bio.Err(); err != nil {
+ // For an unexpected connection drop from server, we close the body
+ // and re-connect.
+ if err == io.ErrUnexpectedEOF {
+ resp.Body.Close()
+ continue
+ }
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+ }
+ }(notificationInfoCh)
+
+ // Returns the notification info channel, for caller to start reading from.
+ return notificationInfoCh
+}
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
new file mode 100644
index 000000000..200f33e9b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -0,0 +1,177 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "errors"
+ "net/url"
+ "time"
+)
+
+// supportedGetReqParams - supported request parameters for GET presigned request.
+var supportedGetReqParams = map[string]struct{}{
+ "response-expires": {},
+ "response-content-type": {},
+ "response-cache-control": {},
+ "response-content-language": {},
+ "response-content-encoding": {},
+ "response-content-disposition": {},
+}
+
+// presignURL - Returns a presigned URL for an input 'method'.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ // Input validation.
+ if method == "" {
+ return nil, ErrInvalidArgument("method cannot be empty.")
+ }
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ if err := isValidExpiry(expires); err != nil {
+ return nil, err
+ }
+
+ // Convert expires into seconds.
+ expireSeconds := int64(expires / time.Second)
+ reqMetadata := requestMetadata{
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ }
+
+ // For "GET" we are handling additional request parameters to
+ // override its response headers.
+ if method == "GET" {
+ // Verify if input map has unsupported params, if yes exit.
+ for k := range reqParams {
+ if _, ok := supportedGetReqParams[k]; !ok {
+ return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
+ }
+ }
+ // Save the request parameters to be used in presigning for GET request.
+ reqMetadata.queryValues = reqParams
+ }
+
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ req, err := c.newRequest(method, reqMetadata)
+ if err != nil {
+ return nil, err
+ }
+ return req.URL, nil
+}
+
+// PresignedGetObject - Returns a presigned URL to access an object
+// without credentials. Expires maximum is 7days - ie. 604800 and
+// minimum is 1. Additionally you can override a set of response
+// headers using the query parameters.
+func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL("GET", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
+ return c.presignURL("PUT", bucketName, objectName, expires, nil)
+}
+
+// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
+func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
+ // Validate input arguments.
+ if p.expiration.IsZero() {
+ return nil, nil, errors.New("Expiration time must be specified")
+ }
+ if _, ok := p.formData["key"]; !ok {
+ return nil, nil, errors.New("object key must be specified")
+ }
+ if _, ok := p.formData["bucket"]; !ok {
+ return nil, nil, errors.New("bucket name must be specified")
+ }
+
+ bucketName := p.formData["bucket"]
+ // Fetch the bucket location.
+ location, err := c.getBucketLocation(bucketName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err = c.makeTargetURL(bucketName, "", location, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Keep time.
+ t := time.Now().UTC()
+ // For signature version '2' handle here.
+ if c.signature.isV2() {
+ policyBase64 := p.base64()
+ p.formData["policy"] = policyBase64
+ // For Google endpoint set this value to be 'GoogleAccessId'.
+ if isGoogleEndpoint(c.endpointURL) {
+ p.formData["GoogleAccessId"] = c.accessKeyID
+ } else {
+ // For all other endpoints set this value to be 'AWSAccessKeyId'.
+ p.formData["AWSAccessKeyId"] = c.accessKeyID
+ }
+ // Sign the policy.
+ p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
+ return u, p.formData, nil
+ }
+
+ // Add date policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-date",
+ value: t.Format(iso8601DateFormat),
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add algorithm policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-algorithm",
+ value: signV4Algorithm,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add a credential policy.
+ credential := getCredential(c.accessKeyID, location, t)
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-credential",
+ value: credential,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Get base64 encoded policy.
+ policyBase64 := p.base64()
+ // Fill in the form data.
+ p.formData["policy"] = policyBase64
+ p.formData["x-amz-algorithm"] = signV4Algorithm
+ p.formData["x-amz-credential"] = credential
+ p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
+ p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ return u, p.formData, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
new file mode 100644
index 000000000..3c9f438ef
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -0,0 +1,294 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+/// Bucket operations
+
+// MakeBucket creates a new bucket with bucketName.
+//
+// Location is an optional argument, by default all buckets are
+// created in US Standard Region.
+//
+// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
+// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
+func (c Client) MakeBucket(bucketName string, location string) error {
+ // Validate the input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If location is empty, treat is a default region 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // Instantiate the request.
+ req, err := c.makeBucketRequest(bucketName, location)
+ if err != nil {
+ return err
+ }
+
+ // Execute the request.
+ resp, err := c.do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Save the location into cache on a successful makeBucket response.
+ c.bucketLocCache.Set(bucketName, location)
+
+ // Return.
+ return nil
+}
+
+// makeBucketRequest constructs request for makeBucket.
+func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+
+ // In case of Amazon S3. The make bucket issued on already
+ // existing bucket would fail with 'AuthorizationMalformed' error
+ // if virtual style is used. So we default to 'path style' as that
+ // is the preferred method here. The final location of the
+ // 'bucket' is provided through XML LocationConstraint data with
+ // the request.
+ targetURL, err := url.Parse(c.endpointURL)
+ if err != nil {
+ return nil, err
+ }
+ targetURL.Path = "/" + bucketName + "/"
+
+ // get a new HTTP request for the method.
+ req, err := http.NewRequest("PUT", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := createBucketConfiguration{}
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return nil, err
+ }
+ createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
+ req.Body = ioutil.NopCloser(createBucketConfigBuffer)
+ req.ContentLength = int64(len(createBucketConfigBytes))
+ // Set content-md5.
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
+ if c.signature.isV4() {
+ // Set sha256.
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
+ }
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ // Signature calculated for MakeBucket request should be for 'us-east-1',
+ // regardless of the bucket's location constraint.
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+
+ // Return signed request.
+ return req, nil
+}
+
+// SetBucketPolicy set the access permissions on an existing bucket.
+//
+// For example
+//
+// none - owner gets full access [default].
+// readonly - anonymous get access for everyone at a given object prefix.
+// readwrite - anonymous list/put/delete access to a given object prefix.
+// writeonly - anonymous put/delete access to a given object prefix.
+func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return err
+ }
+ if !bucketPolicy.IsValidBucketPolicy() {
+ return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
+ }
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return err
+ }
+
+ if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
+ // As the request is for removing policy and the bucket
+ // has empty policy statements, just return success.
+ return nil
+ }
+
+ policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
+
+ // Save the updated policies.
+ return c.putBucketPolicy(bucketName, policyInfo)
+}
+
+// Saves a new bucket policy.
+func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If there are no policy statements, we should remove entire policy.
+ if len(policyInfo.Statements) == 0 {
+ return c.removeBucketPolicy(bucketName)
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ policyBytes, err := json.Marshal(&policyInfo)
+ if err != nil {
+ return err
+ }
+
+ policyBuffer := bytes.NewReader(policyBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyBuffer,
+ contentLength: int64(len(policyBytes)),
+ contentMD5Bytes: sumMD5(policyBytes),
+ contentSHA256Bytes: sum256(policyBytes),
+ }
+
+ // Execute PUT to upload a new bucket policy.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// Removes all policies on a bucket.
+func (c Client) removeBucketPolicy(bucketName string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetBucketNotification saves a new bucket notification.
+func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ notifBytes, err := xml.Marshal(bucketNotification)
+ if err != nil {
+ return err
+ }
+
+ notifBuffer := bytes.NewReader(notifBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Bytes: sumMD5(notifBytes),
+ contentSHA256Bytes: sum256(notifBytes),
+ }
+
+ // Execute PUT to upload a new bucket notification.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
+func (c Client) RemoveAllBucketNotification(bucketName string) error {
+ return c.SetBucketNotification(bucketName, BucketNotification{})
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket_test.go b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
new file mode 100644
index 000000000..a1899fbe2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
@@ -0,0 +1,274 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+)
+
+// Tests validate http request formulated for creation of bucket.
+func TestMakeBucketRequest(t *testing.T) {
+ // Generates expected http request for bucket creation.
+ // Used for asserting with the actual request generated.
+ createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
+
+ targetURL, err := url.Parse(c.endpointURL)
+ if err != nil {
+ return nil, err
+ }
+ targetURL.Path = "/" + bucketName + "/"
+
+ // get a new HTTP request for the method.
+ req, err = http.NewRequest("PUT", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := createBucketConfiguration{}
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return nil, err
+ }
+ createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
+ req.Body = ioutil.NopCloser(createBucketConfigBuffer)
+ req.ContentLength = int64(len(createBucketConfigBytes))
+ // Set content-md5.
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
+ if c.signature.isV4() {
+ // Set sha256.
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
+ }
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ // Signature calculated for MakeBucket request should be for 'us-east-1',
+ // regardless of the bucket's location constraint.
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+
+ // Return signed request.
+ return req, nil
+ }
+
+ // Get Request body.
+ getReqBody := func(reqBody io.ReadCloser) (string, error) {
+ contents, err := ioutil.ReadAll(reqBody)
+ if err != nil {
+ return "", err
+ }
+ return string(contents), nil
+ }
+
+ // Info for 'Client' creation.
+ // Will be used as arguments for 'NewClient'.
+ type infoForClient struct {
+ endPoint string
+ accessKey string
+ secretKey string
+ enableInsecure bool
+ }
+ // dataset for 'NewClient' call.
+ info := []infoForClient{
+ // endpoint localhost.
+ // both access-key and secret-key are empty.
+ {"localhost:9000", "", "", false},
+ // both access-key are secret-key exists.
+ {"localhost:9000", "my-access-key", "my-secret-key", false},
+ // one of acess-key and secret-key are empty.
+ {"localhost:9000", "", "my-secret-key", false},
+
+ // endpoint amazon s3.
+ {"s3.amazonaws.com", "", "", false},
+ {"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
+ {"s3.amazonaws.com", "my-acess-key", "", false},
+
+ // endpoint google cloud storage.
+ {"storage.googleapis.com", "", "", false},
+ {"storage.googleapis.com", "my-access-key", "my-secret-key", false},
+ {"storage.googleapis.com", "", "my-secret-key", false},
+
+ // endpoint custom domain running Minio server.
+ {"play.minio.io", "", "", false},
+ {"play.minio.io", "my-access-key", "my-secret-key", false},
+ {"play.minio.io", "my-acess-key", "", false},
+ }
+
+ testCases := []struct {
+ bucketName string
+ location string
+ // data for new client creation.
+ info infoForClient
+ // error in the output.
+ err error
+ // flag indicating whether tests should pass.
+ shouldPass bool
+ }{
+ // Test cases with Invalid bucket name.
+ {".mybucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket.", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket-", "", infoForClient{}, ErrInvalidBucketName("Bucket name contains invalid characters."), false},
+ {"my", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
+ {"", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be empty."), false},
+ {"my..bucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
+
+ // Test case with all valid values for S3 bucket location.
+ // Client is constructed using the info struct.
+ // case with empty location.
+ {"my-bucket", "", info[0], nil, true},
+ // case with location set to standard 'us-east-1'.
+ {"my-bucket", "us-east-1", info[0], nil, true},
+ // case with location set to a value different from 'us-east-1'.
+ {"my-bucket", "eu-central-1", info[0], nil, true},
+
+ {"my-bucket", "", info[1], nil, true},
+ {"my-bucket", "us-east-1", info[1], nil, true},
+ {"my-bucket", "eu-central-1", info[1], nil, true},
+
+ {"my-bucket", "", info[2], nil, true},
+ {"my-bucket", "us-east-1", info[2], nil, true},
+ {"my-bucket", "eu-central-1", info[2], nil, true},
+
+ {"my-bucket", "", info[3], nil, true},
+ {"my-bucket", "us-east-1", info[3], nil, true},
+ {"my-bucket", "eu-central-1", info[3], nil, true},
+
+ {"my-bucket", "", info[4], nil, true},
+ {"my-bucket", "us-east-1", info[4], nil, true},
+ {"my-bucket", "eu-central-1", info[4], nil, true},
+
+ {"my-bucket", "", info[5], nil, true},
+ {"my-bucket", "us-east-1", info[5], nil, true},
+ {"my-bucket", "eu-central-1", info[5], nil, true},
+
+ {"my-bucket", "", info[6], nil, true},
+ {"my-bucket", "us-east-1", info[6], nil, true},
+ {"my-bucket", "eu-central-1", info[6], nil, true},
+
+ {"my-bucket", "", info[7], nil, true},
+ {"my-bucket", "us-east-1", info[7], nil, true},
+ {"my-bucket", "eu-central-1", info[7], nil, true},
+
+ {"my-bucket", "", info[8], nil, true},
+ {"my-bucket", "us-east-1", info[8], nil, true},
+ {"my-bucket", "eu-central-1", info[8], nil, true},
+
+ {"my-bucket", "", info[9], nil, true},
+ {"my-bucket", "us-east-1", info[9], nil, true},
+ {"my-bucket", "eu-central-1", info[9], nil, true},
+
+ {"my-bucket", "", info[10], nil, true},
+ {"my-bucket", "us-east-1", info[10], nil, true},
+ {"my-bucket", "eu-central-1", info[10], nil, true},
+
+ {"my-bucket", "", info[11], nil, true},
+ {"my-bucket", "us-east-1", info[11], nil, true},
+ {"my-bucket", "eu-central-1", info[11], nil, true},
+ }
+
+ for i, testCase := range testCases {
+ // cannot create a newclient with empty endPoint value.
+ // validates and creates a new client only if the endPoint value is not empty.
+ client := &Client{}
+ var err error
+ if testCase.info.endPoint != "" {
+
+ client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
+ if err != nil {
+ t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
+ }
+ }
+
+ actualReq, err := client.makeBucketRequest(testCase.bucketName, testCase.location)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ expectedReq := &http.Request{}
+ expectedReq, err = createExpectedRequest(client, testCase.bucketName, testCase.location, expectedReq)
+ if err != nil {
+ t.Fatalf("Test %d: Expected request Creation failed", i+1)
+ }
+ if expectedReq.Method != actualReq.Method {
+ t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
+ }
+ if expectedReq.URL.String() != actualReq.URL.String() {
+ t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
+ }
+ if expectedReq.ContentLength != actualReq.ContentLength {
+ t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
+ }
+
+ if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
+ t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
+ }
+ if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
+ t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
+ }
+
+ if testCase.location != "us-east-1" && testCase.location != "" {
+ expectedContent, err := getReqBody(expectedReq.Body)
+ if err != nil {
+ t.Fatalf("Test %d: Coudln't parse request body", i+1)
+ }
+ actualContent, err := getReqBody(actualReq.Body)
+ if err != nil {
+ t.Fatalf("Test %d: Coudln't parse request body", i+1)
+ }
+ if expectedContent != actualContent {
+ t.Errorf("Test %d: Expected request body doesn't match actual content body", i+1)
+ }
+ if expectedReq.Header.Get("Content-Md5") != actualReq.Header.Get("Content-Md5") {
+ t.Errorf("Test %d: Request body Md5 differs from the expected result", i+1)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
new file mode 100644
index 000000000..2eaef2e30
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -0,0 +1,225 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+)
+
+// Verify if reader is *os.File
+func isFile(reader io.Reader) (ok bool) {
+ _, ok = reader.(*os.File)
+ return
+}
+
+// Verify if reader is *minio.Object
+func isObject(reader io.Reader) (ok bool) {
+ _, ok = reader.(*Object)
+ return
+}
+
+// Verify if reader is a generic ReaderAt
+func isReadAt(reader io.Reader) (ok bool) {
+ _, ok = reader.(io.ReaderAt)
+ return
+}
+
+// shouldUploadPart - verify if part should be uploaded.
+func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
+ // If part not found should upload the part.
+ uploadedPart, found := objectParts[objPart.PartNumber]
+ if !found {
+ return true
+ }
+ // if size mismatches should upload the part.
+ if objPart.Size != uploadedPart.Size {
+ return true
+ }
+ // if md5sum mismatches should upload the part.
+ if objPart.ETag != uploadedPart.ETag {
+ return true
+ }
+ return false
+}
+
+// optimalPartInfo - calculate the optimal part info for a given
+// object size.
+//
+// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
+// object storage it will have the following parameters as constants.
+//
+// maxPartsCount - 10000
+// minPartSize - 5MiB
+// maxMultipartPutObjectSize - 5TiB
+//
+func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
+ // object size is '-1' set it to 5TiB.
+ if objectSize == -1 {
+ objectSize = maxMultipartPutObjectSize
+ }
+ // object size is larger than supported maximum.
+ if objectSize > maxMultipartPutObjectSize {
+ err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
+ return
+ }
+ // Use floats for part size for all calculations to avoid
+ // overflows during float64 to int64 conversions.
+ partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
+ partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
+ // Total parts count.
+ totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
+ // Part size.
+ partSize = int64(partSizeFlt)
+ // Last part size.
+ lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
+ return totalPartsCount, partSize, lastPartSize, nil
+}
+
+// hashCopyBuffer is identical to hashCopyN except that it doesn't take
+// any size argument but takes a buffer argument and reader should be
+// of io.ReaderAt interface.
+//
+// Stages reads from offsets into the buffer, if buffer is nil it is
+// initialized to optimalBufferSize.
+func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
+ hashWriter := writer
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // Buffer is nil, initialize.
+ if buf == nil {
+ buf = make([]byte, optimalReadBufferSize)
+ }
+
+ // Offset to start reading from.
+ var readAtOffset int64
+
+ // Following block reads data at an offset from the input
+ // reader and copies data to into local temporary file.
+ for {
+ readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ return 0, rerr
+ }
+ }
+ writeSize, werr := hashWriter.Write(buf[:readAtSize])
+ if werr != nil {
+ return 0, werr
+ }
+ if readAtSize != writeSize {
+ return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
+ }
+ readAtOffset += int64(writeSize)
+ size += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, err
+}
+
+// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
+func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
+ hashWriter := writer
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // Copies to input at writer.
+ size, err = io.CopyN(hashWriter, reader, partSize)
+ if err != nil {
+ // If not EOF return error right here.
+ if err != io.EOF {
+ return 0, err
+ }
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, err
+}
+
+// getUploadID - fetch upload id if already present for an object name
+// or initiate a new request to fetch a new upload id.
+func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", false, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return "", false, err
+ }
+
+ // Set content Type to default if empty string.
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Find upload id for previous upload for an object.
+ uploadID, err = c.findUploadID(bucketName, objectName)
+ if err != nil {
+ return "", false, err
+ }
+ if uploadID == "" {
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
+ if err != nil {
+ return "", false, err
+ }
+ // Save the new upload id.
+ uploadID = initMultipartUploadResult.UploadID
+ // Indicate that this is a new upload id.
+ isNew = true
+ }
+ return uploadID, isNew, nil
+}
+
+// computeHash - Calculates hashes for an input read Seeker.
+func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
+ hashWriter := ioutil.Discard
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // If no buffer is provided, no need to allocate just use io.Copy.
+ size, err = io.Copy(hashWriter, reader)
+ if err != nil {
+ return 0, err
+ }
+
+ // Seek back reader to the beginning location.
+ if _, err := reader.Seek(0, 0); err != nil {
+ return 0, err
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
new file mode 100644
index 000000000..c7cd46d08
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -0,0 +1,68 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "net/http"
+
+// CopyObject - copy a source object into a new object with the provided name in the provided bucket
+func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ if objectSource == "" {
+ return ErrInvalidArgument("Object source cannot be empty.")
+ }
+
+ // customHeaders apply headers.
+ customHeaders := make(http.Header)
+ for _, cond := range cpCond.conditions {
+ customHeaders.Set(cond.key, cond.value)
+ }
+
+ // Set copy source.
+ customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
+
+ // Execute PUT on objectName.
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeaders,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Decode copy response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return err
+ }
+
+ // Return nil on success.
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
new file mode 100644
index 000000000..deaed0acd
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -0,0 +1,307 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "mime"
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// FPutObject - Create an object in a bucket, with contents from file at filePath.
+func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Open the referenced file.
+ fileReader, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return 0, err
+ }
+ defer fileReader.Close()
+
+ // Save the file stat.
+ fileStat, err := fileReader.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+
+ // Check for largest object size allowed.
+ if fileSize > int64(maxMultipartPutObjectSize) {
+ return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
+ }
+
+ // Set contentType based on filepath extension if not given or default
+ // value of "binary/octet-stream" if the extension has no associated type.
+ if contentType == "" {
+ if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ }
+
+ // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
+ // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
+ if isGoogleEndpoint(c.endpointURL) {
+ if fileSize > int64(maxSinglePutObjectSize) {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ }
+
+ // NOTE: S3 doesn't allow anonymous multipart requests.
+ if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if fileSize > int64(maxSinglePutObjectSize) {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for anonymous requests to Amazon
+ // S3. Uploads up to 5GiB in size.
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ }
+
+ // Small object upload is initiated for uploads for input data size smaller than 5MiB.
+ if fileSize < minPartSize && fileSize >= 0 {
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ }
+ // Upload all large objects as multipart.
+ n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "NotImplemented" {
+ // If size of file is greater than '5GiB' fail.
+ if fileSize > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ }
+ return n, err
+ }
+ return n, nil
+}
+
+// putObjectMultipartFromFile - Creates object from contents of *os.File
+//
+// NOTE: This function is meant to be used for readers with local
+// file as in *os.File. This function resumes by skipping all the
+// necessary parts which were already uploaded by verifying them
+// against MD5SUM of each individual parts. This function also
+// effectively utilizes file system capabilities of reading from
+// specific sections and not having to create temporary files.
+func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Get upload id for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // A map of all uploaded parts.
+ var partsInfo = make(map[int]objectPart)
+
+ // If this session is a continuation of a previous session fetch all
+ // previously uploaded parts info.
+ if !isNew {
+ // Fetch previously upload parts and maximum part size.
+ partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
+ if err != nil {
+ return 0, err
+ }
+
+ // Create a channel to communicate a part was uploaded.
+ // Buffer this to 10000, the maximum number of parts allowed by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
+
+ // Create a channel to communicate which part to upload.
+ // Buffer this to 10000, the maximum number of parts allowed by S3.
+ uploadPartsCh := make(chan int, 10000)
+
+ // Just for readability.
+ lastPartNumber := totalPartsCount
+
+ // Send each part through the partUploadCh to be uploaded.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- p
+ }
+ close(uploadPartsCh)
+
+ // Use three 'workers' to upload parts in parallel.
+ for w := 1; w <= 3; w++ {
+ go func() {
+ // Deal with each part as it comes through the channel.
+ for partNumber := range uploadPartsCh {
+ // Add hash algorithms that need to be calculated by computeHash()
+ // In case of a non-v4 signature or https connection, sha256 is not needed.
+ hashAlgos := make(map[string]hash.Hash)
+ hashSums := make(map[string][]byte)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ // Create the part to be uploaded.
+ verifyObjPart := objectPart{
+ ETag: hex.EncodeToString(hashSums["md5"]),
+ PartNumber: partNumber,
+ Size: partSize,
+ }
+ // If this is the last part do not give it the full part size.
+ if partNumber == lastPartNumber {
+ verifyObjPart.Size = lastPartSize
+ }
+
+ // Verify if part should be uploaded.
+ if shouldUploadPart(verifyObjPart, partsInfo) {
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(partNumber-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if partNumber == lastPartNumber {
+ readOffset = (fileSize - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
+ var prtSize int64
+ prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Proceed to upload the part.
+ var objPart objectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+ }
+ // Return through the channel the part size.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: verifyObjPart.Size,
+ PartNum: partNumber,
+ Error: nil,
+ }
+ }
+ }()
+ }
+
+ // Retrieve each uploaded part once it is done.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ part, ok := partsInfo[uploadRes.PartNum]
+ if !ok {
+ return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the total uploaded size.
+ totalUploadedSize += uploadRes.Size
+ // Update the progress bar if there is one.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
+ return totalUploadedSize, err
+ }
+ }
+ // Store the part to be completed.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Verify if we uploaded all data.
+ if totalUploadedSize != fileSize {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
new file mode 100644
index 000000000..cdd3f53c2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -0,0 +1,393 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Comprehensive put object operation involving multipart resumable uploads.
+//
+// Following code handles these types of readers.
+//
+// - *os.File
+// - *minio.Object
+// - Any reader which has a method 'ReadAt()'
+//
+// If we exhaust all the known types, code proceeds to use stream as
+// is where each part is re-downloaded, checksummed and verified
+// before upload.
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+ if size > 0 && size > minPartSize {
+ // Verify if reader is *os.File, then use file system functionalities.
+ if isFile(reader) {
+ return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
+ }
+ // Verify if reader is *minio.Object or io.ReaderAt.
+ // NOTE: Verification of object is kept for a specific purpose
+ // while it is going to be duck typed similar to io.ReaderAt.
+ // It is to indicate that *minio.Object implements io.ReaderAt.
+ // and such a functionality is used in the subsequent code
+ // path.
+ if isObject(reader) || isReadAt(reader) {
+ return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
+ }
+ }
+ // For any other data size and reader type we do generic multipart
+ // approach by staging data in temporary files and uploading them.
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
+}
+
+// putObjectStream uploads files bigger than 5MiB, and also supports
+// special case where size is unknown i.e '-1'.
+func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // A map of all previously uploaded parts.
+ var partsInfo = make(map[int]objectPart)
+
+ // getUploadID for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // If This session is a continuation of a previous session fetch all
+ // previously uploaded parts info and as a special case only fetch partsInfo
+ // for only known upload size.
+ if !isNew {
+ // Fetch previously uploaded parts and maximum part size.
+ partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize a temporary buffer.
+ tmpBuffer := new(bytes.Buffer)
+
+ for partNumber <= totalPartsCount {
+ // Choose hash algorithms to be calculated by hashCopyN, avoid sha256
+ // with non-v4 signature request or HTTPS connection
+ hashSums := make(map[string][]byte)
+ hashAlgos := make(map[string]hash.Hash)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ // Calculates hash sums while copying partSize bytes into tmpBuffer.
+ prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
+ if rErr != nil {
+ if rErr != io.EOF {
+ return 0, rErr
+ }
+ }
+
+ var reader io.Reader
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ reader = newHook(tmpBuffer, progress)
+
+ // Verify if part should be uploaded.
+ if shouldUploadPart(objectPart{
+ ETag: hex.EncodeToString(hashSums["md5"]),
+ PartNumber: partNumber,
+ Size: prtSize,
+ }, partsInfo) {
+ // Proceed to upload the part.
+ var objPart objectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ if err != nil {
+ // Reset the temporary buffer upon any error.
+ tmpBuffer.Reset()
+ return totalUploadedSize, err
+ }
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+ } else {
+ // Update the progress reader for the skipped part.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
+ return totalUploadedSize, err
+ }
+ }
+ }
+
+ // Reset the temporary buffer.
+ tmpBuffer.Reset()
+
+ // Save successfully uploaded size.
+ totalUploadedSize += prtSize
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if size < 0 && rErr == io.EOF {
+ break
+ }
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
+func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploads", "")
+
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Set ContentType header.
+ customHeader := make(http.Header)
+ customHeader.Set("Content-Type", contentType)
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ }
+
+ // Execute POST on an objectName to initiate multipart upload.
+ resp, err := c.executeMethod("POST", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode xml for new multipart upload.
+ initiateMultipartUploadResult := initiateMultipartUploadResult{}
+ err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
+ if err != nil {
+ return initiateMultipartUploadResult, err
+ }
+ return initiateMultipartUploadResult, nil
+}
+
+// uploadPart - Uploads a part in a multipart upload.
+func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return objectPart{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return objectPart{}, err
+ }
+ if size > maxPartSize {
+ return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
+ }
+ if size <= -1 {
+ return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+ if partNumber <= 0 {
+ return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
+ }
+ if uploadID == "" {
+ return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
+ }
+
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number.
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Bytes: md5Sum,
+ contentSHA256Bytes: sha256Sum,
+ }
+
+ // Execute PUT on each part.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return objectPart{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Once successfully uploaded, return completed part.
+ objPart := objectPart{}
+ objPart.Size = size
+ objPart.PartNumber = partNumber
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
+ return objPart, nil
+}
+
+// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
+func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Marshal complete multipart body.
+ completeMultipartUploadBytes, err := xml.Marshal(complete)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Instantiate all the complete multipart buffer.
+ completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Bytes: sum256(completeMultipartUploadBytes),
+ }
+
+ // Execute POST to complete multipart upload for an objectName.
+ resp, err := c.executeMethod("POST", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Read resp.Body into a []bytes to parse for Error response inside the body
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ // Decode completed multipart upload response on success.
+ completeMultipartUploadResult := completeMultipartUploadResult{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return completeMultipartUploadResult, err
+ } else if completeMultipartUploadResult.Bucket == "" {
+ // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
+ // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
+ // of the members.
+
+ // Decode completed multipart upload response on failure
+ completeMultipartUploadErr := ErrorResponse{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return completeMultipartUploadResult, err
+ }
+ return completeMultipartUploadResult, completeMultipartUploadErr
+ }
+ return completeMultipartUploadResult, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
new file mode 100644
index 000000000..0f79e708f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go
@@ -0,0 +1,108 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+ "strings"
+)
+
+// PutObjectWithProgress - With progress.
+func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ if reader == nil {
+ return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
+ }
+
+ // Size of the object.
+ var size int64
+
+ // Get reader size.
+ size, err = getReaderSize(reader)
+ if err != nil {
+ return 0, err
+ }
+
+ // Check for largest object size allowed.
+ if size > int64(maxMultipartPutObjectSize) {
+ return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
+ }
+
+ // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
+ // So we fall back to single PUT operation with the maximum limit of 5GiB.
+ if isGoogleEndpoint(c.endpointURL) {
+ if size <= -1 {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ }
+
+ // NOTE: S3 doesn't allow anonymous multipart requests.
+ if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if size <= -1 {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: "Content-Length cannot be negative for anonymous requests.",
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Do not compute MD5 for anonymous requests to Amazon
+ // S3. Uploads up to 5GiB in size.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ }
+
+ // putSmall object.
+ if size < minPartSize && size >= 0 {
+ return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ }
+ // For all sizes greater than 5MiB do multipart.
+ n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ }
+ return n, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
new file mode 100644
index 000000000..14fa4b296
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go
@@ -0,0 +1,246 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "sort"
+)
+
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+}
+
+// shouldUploadPartReadAt - verify if part should be uploaded.
+func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
+ // If part not found part should be uploaded.
+ uploadedPart, found := objectParts[objPart.PartNumber]
+ if !found {
+ return true
+ }
+ // if size mismatches part should be uploaded.
+ if uploadedPart.Size != objPart.Size {
+ return true
+ }
+ return false
+}
+
+// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
+// of type which implements io.ReaderAt interface (ReadAt method).
+//
+// NOTE: This function is meant to be used for all readers which
+// implement io.ReaderAt which allows us for resuming multipart
+// uploads but reading at an offset, which would avoid re-read the
+// data which was already uploaded. Internally this function uses
+// temporary files for staging all the data, these temporary files are
+// cleaned automatically when the caller i.e http client closes the
+// stream after uploading all the contents successfully.
+func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Get upload id for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // A map of all uploaded parts.
+ var partsInfo = make(map[int]objectPart)
+
+ // Fetch all parts info previously uploaded.
+ if !isNew {
+ partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Used for readability, lastPartNumber is always totalPartsCount.
+ lastPartNumber := totalPartsCount
+
+ // Declare a channel that sends the next part number to be uploaded.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadPartsCh := make(chan int, 10000)
+
+ // Declare a channel that sends back the response of a part upload.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
+
+ // Send each part number to the channel to be processed.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- p
+ }
+ close(uploadPartsCh)
+
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= 3; w++ {
+ go func() {
+ // Read defaults to reading at 5MiB buffer.
+ readAtBuffer := make([]byte, optimalReadBufferSize)
+
+ // Each worker will draw from the part channel and upload in parallel.
+ for partNumber := range uploadPartsCh {
+ // Declare a new tmpBuffer.
+ tmpBuffer := new(bytes.Buffer)
+
+ // Verify object if its uploaded.
+ verifyObjPart := objectPart{
+ PartNumber: partNumber,
+ Size: partSize,
+ }
+ // Special case if we see a last part number, save last part
+ // size as the proper part size.
+ if partNumber == lastPartNumber {
+ verifyObjPart.Size = lastPartSize
+ }
+
+ // Only upload the necessary parts. Otherwise return size through channel
+ // to update any progress bar.
+ if shouldUploadPartReadAt(verifyObjPart, partsInfo) {
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(partNumber-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if partNumber == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
+
+ // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
+ // Sha256 is avoided in non-v4 signature requests or HTTPS connections
+ hashSums := make(map[string][]byte)
+ hashAlgos := make(map[string]hash.Hash)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ var prtSize int64
+ prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
+ if err != nil {
+ // Send the error back through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Proceed to upload the part.
+ var objPart objectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+ }
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: verifyObjPart.Size,
+ PartNum: partNumber,
+ Error: nil,
+ }
+ }
+ }()
+ }
+
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ part, ok := partsInfo[uploadRes.PartNum]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ // Update the progress bar if there is one.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
+ return totalUploadedSize, err
+ }
+ }
+ // Store the parts to be completed in order.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Verify if we uploaded all the data.
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
new file mode 100644
index 000000000..f7dd2daf1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -0,0 +1,315 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+// toInt - converts go value to its integer representation based
+// on the value kind if it is an integer.
+func toInt(value reflect.Value) (size int64) {
+ size = -1
+ if value.IsValid() {
+ switch value.Kind() {
+ case reflect.Int:
+ fallthrough
+ case reflect.Int8:
+ fallthrough
+ case reflect.Int16:
+ fallthrough
+ case reflect.Int32:
+ fallthrough
+ case reflect.Int64:
+ size = value.Int()
+ }
+ }
+ return size
+}
+
+// getReaderSize - Determine the size of Reader if available.
+func getReaderSize(reader io.Reader) (size int64, err error) {
+ size = -1
+ if reader == nil {
+ return -1, nil
+ }
+ // Verify if there is a method by name 'Size'.
+ sizeFn := reflect.ValueOf(reader).MethodByName("Size")
+ // Verify if there is a method by name 'Len'.
+ lenFn := reflect.ValueOf(reader).MethodByName("Len")
+ if sizeFn.IsValid() {
+ if sizeFn.Kind() == reflect.Func {
+ // Call the 'Size' function and save its return value.
+ result := sizeFn.Call([]reflect.Value{})
+ if len(result) == 1 {
+ size = toInt(result[0])
+ }
+ }
+ } else if lenFn.IsValid() {
+ if lenFn.Kind() == reflect.Func {
+ // Call the 'Len' function and save its return value.
+ result := lenFn.Call([]reflect.Value{})
+ if len(result) == 1 {
+ size = toInt(result[0])
+ }
+ }
+ } else {
+ // Fallback to Stat() method, two possible Stat() structs exist.
+ switch v := reader.(type) {
+ case *os.File:
+ var st os.FileInfo
+ st, err = v.Stat()
+ if err != nil {
+ // Handle this case specially for "windows",
+ // certain files for example 'Stdin', 'Stdout' and
+ // 'Stderr' it is not allowed to fetch file information.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(err.Error(), "GetFileInformationByHandle") {
+ return -1, nil
+ }
+ }
+ return
+ }
+ // Ignore if input is a directory, throw an error.
+ if st.Mode().IsDir() {
+ return -1, ErrInvalidArgument("Input file cannot be a directory.")
+ }
+ // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
+ // represent *os.File type but internally do not
+ // implement Seekable calls. Ignore them and treat
+ // them like a stream with unknown length.
+ switch st.Name() {
+ case "stdin":
+ fallthrough
+ case "stdout":
+ fallthrough
+ case "stderr":
+ return
+ }
+ size = st.Size()
+ case *Object:
+ var st ObjectInfo
+ st, err = v.Stat()
+ if err != nil {
+ return
+ }
+ size = st.Size
+ }
+ }
+ // Returns the size here.
+ return size, err
+}
+
+// completedParts is a collection of parts sortable by their part numbers.
+// used for sorting the uploaded parts before completing the multipart request.
+type completedParts []completePart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
+
+// PutObject creates an object in a bucket.
+//
+// You must have WRITE permissions on a bucket to create an object.
+//
+// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
+// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
+// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
+// Maximum object size that can be uploaded through this operation will be 5TiB.
+//
+// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
+// So we fall back to single PUT operation with the maximum limit of 5GiB.
+//
+// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
+func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
+ return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
+}
+
+// putObjectNoChecksum special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+
+ // Update progress reader appropriately to the latest offset as we
+ // read from the source.
+ readSeeker := newHook(reader, progress)
+
+ // This function does not calculate sha256 and md5sum for payload.
+ // Execute put object.
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
+ if err != nil {
+ return 0, err
+ }
+ if st.Size != size {
+ return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+ }
+ return size, nil
+}
+
+// putObjectSingle is a special function for uploading single put object request.
+// This special function is used as a fallback when multipart upload fails.
+func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // If size is a stream, upload up to 5GiB.
+ if size <= -1 {
+ size = maxSinglePutObjectSize
+ }
+
+ // Add the appropriate hash algorithms that need to be calculated by hashCopyN
+ // In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
+ hashAlgos := make(map[string]hash.Hash)
+ hashSums := make(map[string][]byte)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ if size <= minPartSize {
+ // Initialize a new temporary buffer.
+ tmpBuffer := new(bytes.Buffer)
+ size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
+ reader = bytes.NewReader(tmpBuffer.Bytes())
+ tmpBuffer.Reset()
+ } else {
+ // Initialize a new temporary file.
+ var tmpFile *tempFile
+ tmpFile, err = newTempFile("single$-putobject-single")
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
+ if err != nil {
+ return 0, err
+ }
+ // Seek back to beginning of the temporary file.
+ if _, err = tmpFile.Seek(0, 0); err != nil {
+ return 0, err
+ }
+ reader = tmpFile
+ }
+ // Return error if its not io.EOF.
+ if err != nil {
+ if err != io.EOF {
+ return 0, err
+ }
+ }
+ // Execute put object.
+ st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
+ if err != nil {
+ return 0, err
+ }
+ if st.Size != size {
+ return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+ }
+ // Progress the reader to the size if putObjectDo is successful.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
+ return size, err
+ }
+ }
+ return size, nil
+}
+
+// putObjectDo - executes the put object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+
+ if size <= -1 {
+ return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+
+ if size > maxSinglePutObjectSize {
+ return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+
+ if strings.TrimSpace(contentType) == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Set headers.
+ customHeader := make(http.Header)
+ customHeader.Set("Content-Type", contentType)
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Bytes: md5Sum,
+ contentSHA256Bytes: sha256Sum,
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ var metadata ObjectInfo
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
+ // A success here means data was written to server successfully.
+ metadata.Size = size
+
+ // Return here.
+ return metadata, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
new file mode 100644
index 000000000..110a73e99
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -0,0 +1,273 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+)
+
+// RemoveBucket deletes the bucket name.
+//
+// All objects (including all object versions and delete markers).
+// in the bucket must be deleted before successfully attempting this request.
+func (c Client) RemoveBucket(bucketName string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Execute DELETE on bucket.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Remove the location from cache on a successful delete.
+ c.bucketLocCache.Delete(bucketName)
+
+ return nil
+}
+
+// RemoveObject remove an object from a bucket.
+func (c Client) RemoveObject(bucketName, objectName string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ // DeleteObject always responds with http '204' even for
+ // objects which do not exist. So no need to handle them
+ // specifically.
+ return nil
+}
+
+// RemoveObjectError - container of Multi Delete S3 API error
+type RemoveObjectError struct {
+ ObjectName string
+ Err error
+}
+
+// generateRemoveMultiObjects - generate the XML request for remove multi objects request
+func generateRemoveMultiObjectsRequest(objects []string) []byte {
+ rmObjects := []deleteObject{}
+ for _, obj := range objects {
+ rmObjects = append(rmObjects, deleteObject{Key: obj})
+ }
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
+ return xmlBytes
+}
+
+// processRemoveMultiObjectsResponse - parse the remove multi objects web service
+// and return the success/failure result status for each object
+func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ err := xmlDecoder(body, rmResult)
+ if err != nil {
+ errorCh <- RemoveObjectError{ObjectName: "", Err: err}
+ return
+ }
+
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ errorCh <- RemoveObjectError{
+ ObjectName: obj.Key,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }
+ }
+}
+
+// RemoveObjects remove multiples objects from a bucket.
+// The list of objects to remove are received from objectsCh.
+// Remove failures are sent back via error channel.
+func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ errorCh := make(chan RemoveObjectError, 1)
+
+ // Validate if bucket name is valid.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: err,
+ }
+ return errorCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: ErrInvalidArgument("Objects channel cannot be nil"),
+ }
+ return errorCh
+ }
+
+ // Generate and call MultiDelete S3 requests based on entries received from objectsCh
+ go func(errorCh chan<- RemoveObjectError) {
+ maxEntries := 1000
+ finish := false
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Close error channel when Multi delete finishes.
+ defer close(errorCh)
+
+ // Loop over entries by 1000 and call MultiDelete requests
+ for {
+ if finish {
+ break
+ }
+ count := 0
+ var batch []string
+
+ // Try to gather 1000 entries
+ for object := range objectsCh {
+ batch = append(batch, object)
+ if count++; count >= maxEntries {
+ break
+ }
+ }
+ if count < maxEntries {
+ // We didn't have 1000 entries, so this is the last batch
+ finish = true
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Bytes: sumMD5(removeBytes),
+ contentSHA256Bytes: sum256(removeBytes),
+ })
+ if err != nil {
+ for _, b := range batch {
+ errorCh <- RemoveObjectError{ObjectName: b, Err: err}
+ }
+ continue
+ }
+
+ // Process multiobjects remove xml response
+ processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
+
+ closeResponse(resp)
+ }
+ }(errorCh)
+ return errorCh
+}
+
+// RemoveIncompleteUpload aborts an partially uploaded object.
+// Requires explicit authentication, no anonymous requests are allowed for multipart API.
+func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ // Find multipart upload id of the object to be aborted.
+ uploadID, err := c.findUploadID(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+ if uploadID != "" {
+ // Upload id found, abort the incomplete multipart upload.
+ err := c.abortMultipartUpload(bucketName, objectName, uploadID)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// abortMultipartUpload aborts a multipart upload for the given
+// uploadID, all previously uploaded parts are deleted.
+func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Execute DELETE on multipart upload.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ // Abort has no response body, handle it for any errors.
+ var errorResponse ErrorResponse
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ // This is needed specifically for abort and it cannot
+ // be converged into default case.
+ errorResponse = ErrorResponse{
+ Code: "NoSuchUpload",
+ Message: "The specified multipart upload does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ return errorResponse
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
new file mode 100644
index 000000000..52e8a120d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -0,0 +1,243 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "time"
+)
+
+// listAllMyBucketsResult container for listBuckets response.
+type listAllMyBucketsResult struct {
+ // Container for one or more buckets.
+ Buckets struct {
+ Bucket []BucketInfo
+ }
+ Owner owner
+}
+
+// owner container for bucket owner information.
+type owner struct {
+ DisplayName string
+ ID string
+}
+
+// commonPrefix container for prefix response.
+type commonPrefix struct {
+ Prefix string
+}
+
+// listBucketResult container for listObjects V2 response.
+type listBucketV2Result struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []commonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ MaxKeys int64
+ Name string
+
+ // Hold the token that will be sent in the next request to fetch the next group of keys
+ NextContinuationToken string
+
+ ContinuationToken string
+ Prefix string
+
+ // FetchOwner and StartAfter are currently not used
+ FetchOwner string
+ StartAfter string
+}
+
+// listBucketResult container for listObjects response.
+type listBucketResult struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []commonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ Marker string
+ MaxKeys int64
+ Name string
+
+ // When response is truncated (the IsTruncated element value in
+ // the response is true), you can use the key name in this field
+ // as marker in the subsequent request to get next set of objects.
+ // Object storage lists objects in alphabetical order Note: This
+ // element is returned only if you have delimiter request
+ // parameter specified. If response does not include the NextMaker
+ // and it is truncated, you can use the value of the last Key in
+ // the response as the marker in the subsequent request to get the
+ // next set of object keys.
+ NextMarker string
+ Prefix string
+}
+
+// listMultipartUploadsResult container for ListMultipartUploads response
+type listMultipartUploadsResult struct {
+ Bucket string
+ KeyMarker string
+ UploadIDMarker string `xml:"UploadIdMarker"`
+ NextKeyMarker string
+ NextUploadIDMarker string `xml:"NextUploadIdMarker"`
+ EncodingType string
+ MaxUploads int64
+ IsTruncated bool
+ Uploads []ObjectMultipartInfo `xml:"Upload"`
+ Prefix string
+ Delimiter string
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ CommonPrefixes []commonPrefix
+}
+
+// initiator container for who initiated multipart upload.
+type initiator struct {
+ ID string
+ DisplayName string
+}
+
+// copyObjectResult container for copy object response.
+type copyObjectResult struct {
+ ETag string
+ LastModified string // time string format "2006-01-02T15:04:05.000Z"
+}
+
+// objectPart container for particular part of an object.
+type objectPart struct {
+ // Part number identifies the part.
+ PartNumber int
+
+ // Date and time the part was uploaded.
+ LastModified time.Time
+
+ // Entity tag returned when the part was uploaded, usually md5sum
+ // of the part.
+ ETag string
+
+ // Size of the uploaded part data.
+ Size int64
+}
+
+// listObjectPartsResult container for ListObjectParts response.
+type listObjectPartsResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+
+ Initiator initiator
+ Owner owner
+
+ StorageClass string
+ PartNumberMarker int
+ NextPartNumberMarker int
+ MaxParts int
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated bool
+ ObjectParts []objectPart `xml:"Part"`
+
+ EncodingType string
+}
+
+// initiateMultipartUploadResult container for InitiateMultiPartUpload
+// response.
+type initiateMultipartUploadResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+}
+
+// completeMultipartUploadResult container for completed multipart
+// upload response.
+type completeMultipartUploadResult struct {
+ Location string
+ Bucket string
+ Key string
+ ETag string
+}
+
+// completePart sub container lists individual part numbers and their
+// md5sum, part of completeMultipartUpload.
+type completePart struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
+
+ // Part number identifies the part.
+ PartNumber int
+ ETag string
+}
+
+// completeMultipartUpload container for completing multipart upload.
+type completeMultipartUpload struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
+ Parts []completePart `xml:"Part"`
+}
+
+// createBucketConfiguration container for bucket configuration.
+type createBucketConfiguration struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
+ Location string `xml:"LocationConstraint"`
+}
+
+// deleteObject container for Delete element in MultiObjects Delete XML request
+type deleteObject struct {
+ Key string
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+// deletedObject container for Deleted element in MultiObjects Delete XML response
+type deletedObject struct {
+ Key string
+ VersionId string `xml:"VersionId,omitempty"`
+ DeleteMarker bool
+ DeleteMarkerVersionId string
+}
+
+// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
+type nonDeletedObject struct {
+ Key string
+ Code string
+ Message string
+}
+
+// deletedMultiObjects container for MultiObjects Delete XML request
+type deleteMultiObjects struct {
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool
+ Objects []deleteObject `xml:"Object"`
+}
+
+// deletedMultiObjectsResult container for MultiObjects Delete XML response
+type deleteMultiObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []deletedObject `xml:"Deleted"`
+ UnDeletedObjects []nonDeletedObject `xml:"Error"`
+}
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
new file mode 100644
index 000000000..976d61241
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -0,0 +1,120 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// BucketExists verify if bucket exists and you have permission to access it.
+func (c Client) BucketExists(bucketName string) (bool, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return false, err
+ }
+
+ // Execute HEAD on bucketName.
+ resp, err := c.executeMethod("HEAD", requestMetadata{
+ bucketName: bucketName,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ if ToErrorResponse(err).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ return false, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return false, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return true, nil
+}
+
+// StatObject verifies if object exists and you have permission to access.
+func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Execute HEAD on objectName.
+ resp, err := c.executeMethod("HEAD", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ md5sum = strings.TrimSuffix(md5sum, "\"")
+
+ // Parse content length.
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ // Parse Last-Modified has http time format.
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Last-Modified time format is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ // Fetch content type if any present.
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ // Save object metadata info.
+ var objectStat ObjectInfo
+ objectStat.ETag = md5sum
+ objectStat.Key = objectName
+ objectStat.Size = size
+ objectStat.LastModified = date
+ objectStat.ContentType = contentType
+ return objectStat, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
new file mode 100644
index 000000000..954927084
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -0,0 +1,680 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Client implements Amazon S3 compatible methods.
+type Client struct {
+ /// Standard options.
+
+ // AccessKeyID required for authorized requests.
+ accessKeyID string
+ // SecretAccessKey required for authorized requests.
+ secretAccessKey string
+ // Choose a signature type if necessary.
+ signature SignatureType
+ // Set to 'true' if Client has no access and secret keys.
+ anonymous bool
+
+ // User supplied.
+ appInfo struct {
+ appName string
+ appVersion string
+ }
+ endpointURL string
+
+ // Indicate whether we are using https or not
+ secure bool
+
+ // Needs allocation.
+ httpClient *http.Client
+ bucketLocCache *bucketLocationCache
+
+ // Advanced functionality.
+ isTraceEnabled bool
+ traceOutput io.Writer
+
+ // Random seed.
+ random *rand.Rand
+}
+
+// Global constants.
+const (
+ libraryName = "minio-go"
+ libraryVersion = "2.0.2"
+)
+
+// User Agent should always following the below style.
+// Please open an issue to discuss any new changes here.
+//
+// Minio (OS; ARCH) LIB/VER APP/VER
+const (
+ libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
+ libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
+)
+
+// NewV2 - instantiate minio client with Amazon S3 signature version
+// '2' compatibility.
+func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ if err != nil {
+ return nil, err
+ }
+ // Set to use signature version '2'.
+ clnt.signature = SignatureV2
+ return clnt, nil
+}
+
+// NewV4 - instantiate minio client with Amazon S3 signature version
+// '4' compatibility.
+func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ if err != nil {
+ return nil, err
+ }
+ // Set to use signature version '4'.
+ clnt.signature = SignatureV4
+ return clnt, nil
+}
+
+// New - instantiate minio client Client, adds automatic verification
+// of signature.
+func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ if err != nil {
+ return nil, err
+ }
+ // Google cloud storage should be set to signature V2, force it if
+ // not.
+ if isGoogleEndpoint(clnt.endpointURL) {
+ clnt.signature = SignatureV2
+ }
+ // If Amazon S3 set to signature v2.n
+ if isAmazonEndpoint(clnt.endpointURL) {
+ clnt.signature = SignatureV4
+ }
+ return clnt, nil
+}
+
+// lockedRandSource provides protected rand source, implements rand.Source interface.
+type lockedRandSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an
+// int64.
+func (r *lockedRandSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+// Seed uses the provided seed value to initialize the generator to a
+// deterministic state.
+func (r *lockedRandSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ // construct endpoint.
+ endpointURL, err := getEndpointURL(endpoint, secure)
+ if err != nil {
+ return nil, err
+ }
+
+ // instantiate new Client.
+ clnt := new(Client)
+ clnt.accessKeyID = accessKeyID
+ clnt.secretAccessKey = secretAccessKey
+ if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
+ clnt.anonymous = true
+ }
+
+ // Remember whether we are using https or not
+ clnt.secure = secure
+
+ // Save endpoint URL, user agent for future uses.
+ clnt.endpointURL = endpointURL.String()
+
+ // Instantiate http client and bucket location cache.
+ clnt.httpClient = &http.Client{
+ Transport: http.DefaultTransport,
+ }
+
+ // Instantiae bucket location cache.
+ clnt.bucketLocCache = newBucketLocationCache()
+
+ // Introduce a new locked random seed.
+ clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
+
+ // Return.
+ return clnt, nil
+}
+
+// SetAppInfo - add application details to user agent.
+func (c *Client) SetAppInfo(appName string, appVersion string) {
+ // if app name and version is not set, we do not a new user
+ // agent.
+ if appName != "" && appVersion != "" {
+ c.appInfo = struct {
+ appName string
+ appVersion string
+ }{}
+ c.appInfo.appName = appName
+ c.appInfo.appVersion = appVersion
+ }
+}
+
+// SetCustomTransport - set new custom transport.
+func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
+ // Set this to override default transport
+ // ``http.DefaultTransport``.
+ //
+ // This transport is usually needed for debugging OR to add your
+ // own custom TLS certificates on the client transport, for custom
+ // CA's and certs which are not part of standard certificate
+ // authority follow this example :-
+ //
+ // tr := &http.Transport{
+ // TLSClientConfig: &tls.Config{RootCAs: pool},
+ // DisableCompression: true,
+ // }
+ // api.SetTransport(tr)
+ //
+ if c.httpClient != nil {
+ c.httpClient.Transport = customHTTPTransport
+ }
+}
+
+// TraceOn - enable HTTP tracing.
+func (c *Client) TraceOn(outputStream io.Writer) {
+ // if outputStream is nil then default to os.Stdout.
+ if outputStream == nil {
+ outputStream = os.Stdout
+ }
+ // Sets a new output stream.
+ c.traceOutput = outputStream
+
+ // Enable tracing.
+ c.isTraceEnabled = true
+}
+
+// TraceOff - disable HTTP tracing.
+func (c *Client) TraceOff() {
+ // Disable tracing.
+ c.isTraceEnabled = false
+}
+
+// requestMetadata - is container for all the values to make a
+// request.
+type requestMetadata struct {
+ // If set newRequest presigns the URL.
+ presignURL bool
+
+ // User supplied.
+ bucketName string
+ objectName string
+ queryValues url.Values
+ customHeader http.Header
+ expires int64
+
+ // Generated by our internal code.
+ bucketLocation string
+ contentBody io.Reader
+ contentLength int64
+ contentSHA256Bytes []byte
+ contentMD5Bytes []byte
+}
+
+// Filter out signature value from Authorization header.
+func (c Client) filterSignature(req *http.Request) {
+ // For anonymous requests, no need to filter.
+ if c.anonymous {
+ return
+ }
+ // Handle if Signature V2.
+ if c.signature.isV2() {
+ // Set a temporary redacted auth
+ req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
+ return
+ }
+
+ /// Signature V4 authorization header.
+
+ // Save the original auth.
+ origAuth := req.Header.Get("Authorization")
+ // Strip out accessKeyID from:
+ // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
+ regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
+ newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
+
+ // Strip out 256-bit signature from: Signature=<256-bit signature>
+ regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
+ newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
+
+ // Set a temporary redacted auth
+ req.Header.Set("Authorization", newAuth)
+ return
+}
+
+// dumpHTTP - dump HTTP request and response.
+func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
+ // Starts http dump.
+ _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Filter out Signature field from Authorization header.
+ c.filterSignature(req)
+
+ // Only display request header.
+ reqTrace, err := httputil.DumpRequestOut(req, false)
+ if err != nil {
+ return err
+ }
+
+ // Write request to trace output.
+ _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
+ if err != nil {
+ return err
+ }
+
+ // Only display response header.
+ var respTrace []byte
+
+ // For errors we make sure to dump response body as well.
+ if resp.StatusCode != http.StatusOK &&
+ resp.StatusCode != http.StatusPartialContent &&
+ resp.StatusCode != http.StatusNoContent {
+ respTrace, err = httputil.DumpResponse(resp, true)
+ if err != nil {
+ return err
+ }
+ } else {
+ // WORKAROUND for https://github.com/golang/go/issues/13942.
+ // httputil.DumpResponse does not print response headers for
+ // all successful calls which have response ContentLength set
+ // to zero. Keep this workaround until the above bug is fixed.
+ if resp.ContentLength == 0 {
+ var buffer bytes.Buffer
+ if err = resp.Header.Write(&buffer); err != nil {
+ return err
+ }
+ respTrace = buffer.Bytes()
+ respTrace = append(respTrace, []byte("\r\n")...)
+ } else {
+ respTrace, err = httputil.DumpResponse(resp, false)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // Write response to trace output.
+ _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
+ if err != nil {
+ return err
+ }
+
+ // Ends the http dump.
+ _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Returns success.
+ return nil
+}
+
+// do - execute http request.
+func (c Client) do(req *http.Request) (*http.Response, error) {
+ // do the request.
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang
+ // versions fix this issue properly.
+ urlErr, ok := err.(*url.Error)
+ if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ }
+ }
+ return nil, err
+ }
+
+ // Response cannot be non-nil, report if its the case.
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return nil, ErrInvalidArgument(msg)
+ }
+
+ // If trace is enabled, dump http request and response.
+ if c.isTraceEnabled {
+ err = c.dumpHTTP(req, resp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+// List of success status.
+var successStatus = []int{
+ http.StatusOK,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+}
+
+// executeMethod - instantiates a given method, and retries the
+// request upon any error up to maxRetries attempts in a binomially
+// delayed manner using a standard back off algorithm.
+func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
+ var isRetryable bool // Indicates if request can be retried.
+ var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ if metadata.contentBody != nil {
+ // Check if body is seekable then it is retryable.
+ bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
+ }
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // Blank indentifier is kept here on purpose since 'range' without
+ // blank identifiers is only supported since go1.4
+ // https://golang.org/doc/go1.4#forrange.
+ for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) {
+ // Retry executes the following function body if request has an
+ // error until maxRetries have been exhausted, retry attempts are
+ // performed after waiting for a given period of time in a
+ // binomial fashion.
+ if isRetryable {
+ // Seek back to beginning for each attempt.
+ if _, err = bodySeeker.Seek(0, 0); err != nil {
+ // If seek failed, no need to retry.
+ return nil, err
+ }
+ }
+
+ // Instantiate a new request.
+ var req *http.Request
+ req, err = c.newRequest(method, metadata)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+ return nil, err
+ }
+
+ // Initiate the request.
+ res, err = c.do(req)
+ if err != nil {
+ // For supported network errors verify.
+ if isNetErrorRetryable(err) {
+ continue // Retry.
+ }
+ // For other errors, return here no need to retry.
+ return nil, err
+ }
+
+ // For any known successful http status, return quickly.
+ for _, httpStatus := range successStatus {
+ if httpStatus == res.StatusCode {
+ return res, nil
+ }
+ }
+
+ // Read the body to be saved later.
+ errBodyBytes, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ // Save the body.
+ errBodySeeker := bytes.NewReader(errBodyBytes)
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // For errors verify if its retryable otherwise fail quickly.
+ errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
+ // Bucket region if set in error response, we can retry the
+ // request with the new region.
+ if errResponse.Region != "" {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+
+ // Verify if error response code is retryable.
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+
+ // Verify if http status code is retryable.
+ if isHTTPStatusRetryable(res.StatusCode) {
+ continue // Retry.
+ }
+
+ // Save the body back again.
+ errBodySeeker.Seek(0, 0) // Seek back to starting point.
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // For all other cases break out of the retry loop.
+ break
+ }
+ return res, err
+}
+
+// newRequest - instantiate a new HTTP request for a given method.
+func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
+ // If no method is supplied default to 'POST'.
+ if method == "" {
+ method = "POST"
+ }
+
+ // Default all requests to "us-east-1" or "cn-north-1" (china region)
+ location := "us-east-1"
+ if isAmazonChinaEndpoint(c.endpointURL) {
+ // For china specifically we need to set everything to
+ // cn-north-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // China region.
+ location = "cn-north-1"
+ }
+
+ // Gather location only if bucketName is present.
+ if metadata.bucketName != "" {
+ location, err = c.getBucketLocation(metadata.bucketName)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Save location.
+ metadata.bucketLocation = location
+
+ // Construct a new target URL.
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize a new HTTP request for the method.
+ req, err = http.NewRequest(method, targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Generate presign url if needed, return right here.
+ if metadata.expires != 0 && metadata.presignURL {
+ if c.anonymous {
+ return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
+ }
+ if c.signature.isV2() {
+ // Presign URL with signature v2.
+ req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
+ } else {
+ // Presign URL with signature v4.
+ req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ }
+ return req, nil
+ }
+
+ // Set content body if available.
+ if metadata.contentBody != nil {
+ req.Body = ioutil.NopCloser(metadata.contentBody)
+ }
+
+ // FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
+ // Skip setting 'expect' header for Google Cloud Storage, there
+ // are some known issues - https://github.com/restic/restic/issues/520
+ if !isGoogleEndpoint(c.endpointURL) {
+ // Set 'Expect' header for the request.
+ req.Header.Set("Expect", "100-continue")
+ }
+
+ // Set 'User-Agent' header for the request.
+ c.setUserAgent(req)
+
+ // Set all headers.
+ for k, v := range metadata.customHeader {
+ req.Header.Set(k, v[0])
+ }
+
+ // set incoming content-length.
+ if metadata.contentLength > 0 {
+ req.ContentLength = metadata.contentLength
+ }
+
+ // Set sha256 sum only for non anonymous credentials.
+ if !c.anonymous {
+ // set sha256 sum for signature calculation only with
+ // signature version '4'.
+ if c.signature.isV4() {
+ shaHeader := unsignedPayload
+ if !c.secure {
+ if metadata.contentSHA256Bytes == nil {
+ shaHeader = hex.EncodeToString(sum256([]byte{}))
+ } else {
+ shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
+ }
+ }
+ req.Header.Set("X-Amz-Content-Sha256", shaHeader)
+ }
+ }
+
+ // set md5Sum for content protection.
+ if metadata.contentMD5Bytes != nil {
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
+ }
+
+ // Sign the request for all authenticated requests.
+ if !c.anonymous {
+ if c.signature.isV2() {
+ // Add signature version '2' authorization header.
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ } else if c.signature.isV4() {
+ // Add signature version '4' authorization header.
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ }
+ }
+
+ // Return request.
+ return req, nil
+}
+
+// set User agent.
+func (c Client) setUserAgent(req *http.Request) {
+ req.Header.Set("User-Agent", libraryUserAgent)
+ if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
+ req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
+ }
+}
+
+// makeTargetURL make a new target url.
+func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
+ // Save host.
+ url, err := url.Parse(c.endpointURL)
+ if err != nil {
+ return nil, err
+ }
+ host := url.Host
+ // For Amazon S3 endpoint, try to fetch location based endpoint.
+ if isAmazonEndpoint(c.endpointURL) {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation)
+ }
+ // Save scheme.
+ scheme := url.Scheme
+
+ urlStr := scheme + "://" + host + "/"
+ // Make URL only if bucketName is available, otherwise use the
+ // endpoint URL.
+ if bucketName != "" {
+ // Save if target url will have buckets which suppport virtual host.
+ isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
+
+ // If endpoint supports virtual host style use that always.
+ // Currently only S3 and Google Cloud Storage would support
+ // virtual host style.
+ if isVirtualHostStyle {
+ urlStr = scheme + "://" + bucketName + "." + host + "/"
+ if objectName != "" {
+ urlStr = urlStr + urlEncodePath(objectName)
+ }
+ } else {
+ // If not fall back to using path style.
+ urlStr = urlStr + bucketName + "/"
+ if objectName != "" {
+ urlStr = urlStr + urlEncodePath(objectName)
+ }
+ }
+ }
+ // If there are any query values, add them to the end.
+ if len(queryValues) > 0 {
+ urlStr = urlStr + "?" + queryEncode(queryValues)
+ }
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
new file mode 100644
index 000000000..2084ffef7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
@@ -0,0 +1,1293 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+// Tests bucket re-create errors.
+func TestMakeBucketErrorV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'eu-west-1'.
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
+ t.Fatal("Error: make bucket should should fail for", bucketName)
+ }
+ // Verify valid error response from server.
+ if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ t.Fatal("Error: Invalid error returned by server", err)
+ }
+ if err = c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+}
+
+// Test get object reader to not throw error on being closed twice.
+func TestGetObjectClosedTwiceV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K.
+ buf := bytes.Repeat([]byte("h"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests removing partially uploaded objects.
+func TestRemovePartiallyUploadedV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping function tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, err = io.CopyN(writer, r, 128*1024)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("Proactively closed to be verified later."))
+ }()
+
+ objectName := bucketName + "-resumable"
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ t.Fatal("Error: PutObject should fail.")
+ }
+ if err.Error() != "Proactively closed to be verified later." {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests resumable put object cloud to cloud.
+func TestResumablePutObjectV2(t *testing.T) {
+ // By passing 'go test -short' skips these tests.
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Create a temporary file.
+ file, err := ioutil.TempFile(os.TempDir(), "resumable")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ // Copy 11MiB worth of random data.
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // New object name.
+ objectName := bucketName + "-resumable"
+
+ // Upload the file.
+ n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Get the uploaded object.
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Upload now cloud to cloud.
+ n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Get object info.
+ objInfo, err := reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != objInfo.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
+ }
+
+ // Remove all temp files, objects and bucket.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-put")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(file.Name())
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+}
+
+// Tests FPutObject hidden contentType setting
+func TestFPutObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-standard")
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-Octet")
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-GTar")
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/x-gtar", rStandard.ContentType)
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+}
+
+// Tests resumable file based put object multipart upload.
+func TestResumableFPutObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ file, err := ioutil.TempFile(os.TempDir(), "resumable")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ objectName := bucketName + "-resumable"
+
+ n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Close the file pro-actively for windows.
+ file.Close()
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(file.Name())
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests various bucket supported formats.
+func TestMakeBucketRegionsV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
+ t.Fatal("Error:", err, bucketName+".withperiod")
+ }
+
+ // Remove the newly created bucket.
+ if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+ t.Fatal("Error:", err, bucketName+".withperiod")
+ }
+}
+
+// Tests get object ReaderSeeker interface methods.
+func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K.
+ buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data.
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ offset := int64(2048)
+ n, err = r.Seek(offset, 0)
+ if err != nil {
+ t.Fatal("Error:", err, offset)
+ }
+ if n != offset {
+ t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
+ offset, n)
+ }
+ n, err = r.Seek(0, 1)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != offset {
+ t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
+ offset, n)
+ }
+ _, err = r.Seek(offset, 2)
+ if err == nil {
+ t.Fatal("Error: seek on positive offset for whence '2' should error out")
+ }
+ n, err = r.Seek(-offset, 2)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != st.Size-offset {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
+ }
+
+ var buffer1 bytes.Buffer
+ if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != (offset - 1) {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests get object ReaderAt interface methods.
+func TestGetObjectReadAtFunctionalV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("8"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ offset := int64(2048)
+
+ // Read directly
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ m, err := r.ReadAt(buf2, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf2), offset)
+ }
+ if m != len(buf2) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf3), offset)
+ }
+ if m != len(buf3) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf4), offset)
+ }
+ if m != len(buf4) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err, len(buf5))
+ }
+ }
+ if m != len(buf5) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
+ }
+ if !bytes.Equal(buf, buf5) {
+ t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err, len(buf6))
+ }
+ }
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests copy object
+func TestCopyObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping functional tests for short runs")
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy")
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("9"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match want %v, got %v",
+ len(buf), n)
+ }
+
+ // Set copy conditions.
+ copyConds := NewCopyConditions()
+ err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Copy source.
+ copySource := bucketName + "/" + objectName
+
+ // Perform the Copy
+ err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ }
+
+ // Source object
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
+ objInfo.Size, objInfoCopy.Size)
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests comprehensive list of all methods.
+func TestFunctionalV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ exists, err = c.BucketExists(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ if !exists {
+ t.Fatal("Error: could not find ", bucketName)
+ }
+
+ // Make the bucket 'public read/write'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // List all buckets.
+ buckets, err := c.ListBuckets()
+ if len(buckets) == 0 {
+ t.Fatal("Error: list buckets cannot be empty", buckets)
+ }
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ t.Fatal("Error: bucket ", bucketName, "not found")
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if n != int64(len(buf)) {
+ t.Fatal("Error: bad length ", n, len(buf))
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-nolength")
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ t.Fatal("Error: object " + objectName + " not found.")
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ t.Fatal("Error: object " + objectName + " not found.")
+ }
+
+ incompObjNotFound := true
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ t.Fatal("Error: unexpected dangling incomplete upload found.")
+ }
+
+ newReader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Verify if presigned url works.
+ resp, err := http.Get(presignedGetURL.String())
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ // Generate presigned GET object url.
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch for presigned GET url.")
+ }
+ // Verify content disposition.
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ }
+
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Generate data more than 32K
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err == nil {
+ t.Fatal("Error:")
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ t.Fatal("Error: ", err)
+ }
+ if err = os.Remove(fileName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ t.Fatal("Error: ", err)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
new file mode 100644
index 000000000..64f8a77f8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
@@ -0,0 +1,2044 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+ letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
+)
+
+// randString generates random names and prepends them with a known prefix.
+func randString(n int, src rand.Source, prefix string) string {
+ b := make([]byte, n)
+ // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
+ for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
+// Tests bucket re-create errors.
+func TestMakeBucketError(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
+ t.Fatal("Error: make bucket should should fail for", bucketName)
+ }
+ // Verify valid error response from server.
+ if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ t.Fatal("Error: Invalid error returned by server", err)
+ }
+ if err = c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+}
+
+// Tests various bucket supported formats.
+func TestMakeBucketRegions(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
+ t.Fatal("Error:", err, bucketName+".withperiod")
+ }
+
+ // Remove the newly created bucket.
+ if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+ t.Fatal("Error:", err, bucketName+".withperiod")
+ }
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func TestPutObjectReadAt(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
+ buf := make([]byte, minPartSize*4)
+ // Use crand.Reader for multipart tests to ensure part order at the end.
+ size, err := io.ReadFull(crand.Reader, buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test listing partially uploaded objects.
+func TestListPartiallyUploaded(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping function tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("0"), minPartSize*2))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, err = io.CopyN(writer, r, (minPartSize*2)/25)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ }()
+
+ objectName := bucketName + "-resumable"
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ t.Fatal("Error: PutObject should fail.")
+ }
+ if err.Error() != "Proactively closed to be verified later." {
+ t.Fatal("Error:", err)
+ }
+
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ isRecursive := true
+ multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
+ for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ t.Fatalf("Error: Error when listing incomplete upload")
+ }
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test get object seeker from the end, using whence set to '2'.
+func TestGetOjectSeekEnd(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ pos, err := r.Seek(-100, 2)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if pos != st.Size-100 {
+ t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
+ }
+ buf2 := make([]byte, 100)
+ m, err := io.ReadFull(r, buf2)
+ if err != nil {
+ t.Fatal("Error: reading through io.ReadFull", err, bucketName, objectName)
+ }
+ if m != len(buf2) {
+ t.Fatalf("Expected %d bytes, got %d", len(buf2), m)
+ }
+ hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
+ hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
+ if hexBuf1 != hexBuf2 {
+ t.Fatalf("Expected %s, got %s instead", hexBuf1, hexBuf2)
+ }
+ pos, err = r.Seek(-100, 2)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if pos != st.Size-100 {
+ t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
+ }
+ if err = r.Close(); err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+}
+
+// Test get object reader to not throw error on being closed twice.
+func TestGetObjectClosedTwice(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test removing multiple objects with Remove API
+func TestRemoveMultipleObjects(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping function tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 1100 objects
+ nrObjects := 1100
+
+ objectsCh := make(chan string)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ _, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error: PutObject shouldn't fail.")
+ }
+ objectsCh <- objectName
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := c.RemoveObjects(bucketName, objectsCh)
+
+ // Check if errorCh doesn't receive any error
+ select {
+ case r, more := <-errorCh:
+ if more {
+ t.Fatalf("Unexpected error, objName(%v) err(%v)", r.ObjectName, r.Err)
+ }
+ }
+
+ // Clean the bucket created by the test
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests removing partially uploaded objects.
+func TestRemovePartiallyUploaded(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping function tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, err = io.CopyN(writer, r, 128*1024)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ }()
+
+ objectName := bucketName + "-resumable"
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ t.Fatal("Error: PutObject should fail.")
+ }
+ if err.Error() != "Proactively closed to be verified later." {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests resumable put object cloud to cloud.
+func TestResumablePutObject(t *testing.T) {
+ // By passing 'go test -short' skips these tests.
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Create a temporary file.
+ file, err := ioutil.TempFile(os.TempDir(), "resumable")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), minPartSize*2))
+ // Copy 11MiB worth of random data.
+ n, err := io.CopyN(file, r, minPartSize*2)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*2) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
+ }
+
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // New object name.
+ objectName := bucketName + "-resumable"
+
+ // Upload the file.
+ n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*2) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
+ }
+
+ // Get the uploaded object.
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Upload now cloud to cloud.
+ n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Get object info.
+ objInfo, err := reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != objInfo.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
+ }
+
+ // Remove all temp files, objects and bucket.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-put")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(file.Name())
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests resumable file based put object multipart upload.
+func TestResumableFPutObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ file, err := ioutil.TempFile(os.TempDir(), "resumable")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Upload 4 parts to use all 3 multipart 'workers' and have an extra part.
+ buffer := make([]byte, minPartSize*4)
+ // Use crand.Reader for multipart tests to ensure parts are uploaded in correct order.
+ size, err := io.ReadFull(crand.Reader, buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+ size, err = file.Write(buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ objectName := bucketName + "-resumable"
+
+ n, err := c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*4) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(file.Name())
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests FPutObject of a big file to trigger multipart
+func TestFPutObjectMultipart(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a temp file with minPartSize*4 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ buffer := make([]byte, minPartSize*4)
+
+ size, err := io.ReadFull(crand.Reader, buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+ size, err = file.Write(buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*4) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests FPutObject hidden contentType setting
+func TestFPutObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a temp file with minPartSize*4 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Upload 4 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
+ buffer := make([]byte, minPartSize*4)
+ // Use random data for multipart tests to check parts are uploaded in correct order.
+ size, err := io.ReadFull(crand.Reader, buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+
+ // Write the data to the file.
+ size, err = file.Write(buffer)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*4) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*4) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(minPartSize*4) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-standard")
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-Octet")
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-GTar")
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/x-gtar", rStandard.ContentType)
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+}
+
+// Tests get object ReaderSeeker interface methods.
+func TestGetObjectReadSeekFunctional(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ offset := int64(2048)
+ n, err = r.Seek(offset, 0)
+ if err != nil {
+ t.Fatal("Error:", err, offset)
+ }
+ if n != offset {
+ t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
+ offset, n)
+ }
+ n, err = r.Seek(0, 1)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != offset {
+ t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
+ offset, n)
+ }
+ _, err = r.Seek(offset, 2)
+ if err == nil {
+ t.Fatal("Error: seek on positive offset for whence '2' should error out")
+ }
+ n, err = r.Seek(-offset, 2)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != st.Size-offset {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
+ }
+
+ var buffer1 bytes.Buffer
+ if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != (offset - 1) {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests get object ReaderAt interface methods.
+func TestGetObjectReadAtFunctional(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ t.Fatal("Error:", err, len(buf1), offset)
+ }
+ if m != len(buf1) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf1))
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf2), offset)
+ }
+ if m != len(buf2) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf3), offset)
+ }
+ if m != len(buf3) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ t.Fatal("Error:", err, st.Size, len(buf4), offset)
+ }
+ if m != len(buf4) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err, len(buf5))
+ }
+ }
+ if m != len(buf5) {
+ t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
+ }
+ if !bytes.Equal(buf, buf5) {
+ t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err, len(buf6))
+ }
+ }
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test Presigned Post Policy
+func TestPresignedPostPolicy(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping functional tests for short runs")
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("4"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match want %v, got %v",
+ len(buf), n)
+ }
+
+ policy := NewPostPolicy()
+
+ if err := policy.SetBucket(""); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+ if err := policy.SetKey(""); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+ if err := policy.SetKeyStartsWith(""); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+ if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+ if err := policy.SetContentType(""); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+ if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
+ t.Fatalf("Error: %s", err)
+ }
+
+ policy.SetBucket(bucketName)
+ policy.SetKey(objectName)
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+ policy.SetContentType("image/png")
+ policy.SetContentLengthRange(1024, 1024*1024)
+
+ _, _, err = c.PresignedPostPolicy(policy)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ policy = NewPostPolicy()
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Tests copy object
+func TestCopyObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping functional tests for short runs")
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy")
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("5"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match want %v, got %v",
+ len(buf), n)
+ }
+
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set copy conditions.
+ copyConds := NewCopyConditions()
+
+ // Start by setting wrong conditions
+ err = copyConds.SetModified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ t.Fatal("Error:", err)
+ }
+ err = copyConds.SetUnmodified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ t.Fatal("Error:", err)
+ }
+ err = copyConds.SetMatchETag("")
+ if err == nil {
+ t.Fatal("Error:", err)
+ }
+ err = copyConds.SetMatchETagExcept("")
+ if err == nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = copyConds.SetMatchETag(objInfo.ETag)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Copy source.
+ copySource := bucketName + "/" + objectName
+
+ // Perform the Copy
+ err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ }
+
+ // Source object
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
+ objInfo.Size, objInfoCopy.Size)
+ }
+
+ // CopyObject again but with wrong conditions
+ copyConds = NewCopyConditions()
+ err = copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = copyConds.SetMatchETagExcept(objInfo.ETag)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Perform the Copy which should fail
+ err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ if err == nil {
+ t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+func TestBucketNotification(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ bucketName := os.Getenv("NOTIFY_BUCKET")
+
+ topicArn := NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
+ queueArn := NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
+
+ topicConfig := NewNotificationConfig(topicArn)
+ topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll)
+ topicConfig.AddFilterSuffix("jpg")
+
+ queueConfig := NewNotificationConfig(queueArn)
+ queueConfig.AddEvents(ObjectCreatedAll)
+ queueConfig.AddFilterPrefix("photos/")
+
+ bNotification := BucketNotification{}
+ bNotification.AddTopic(topicConfig)
+
+ // Add the same topicConfig again, should have no effect
+ // because it is duplicated
+ bNotification.AddTopic(topicConfig)
+ if len(bNotification.TopicConfigs) != 1 {
+ t.Fatal("Error: duplicated entry added")
+ }
+
+ // Add and remove a queue config
+ bNotification.AddQueue(queueConfig)
+ bNotification.RemoveQueueByArn(queueArn)
+
+ err = c.SetBucketNotification(bucketName, bNotification)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ bNotification, err = c.GetBucketNotification(bucketName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if len(bNotification.TopicConfigs) != 1 {
+ t.Fatal("Error: Topic config is empty")
+ }
+
+ if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
+ t.Fatal("Error: cannot get the suffix")
+ }
+
+ err = c.RemoveAllBucketNotification(bucketName)
+ if err != nil {
+ t.Fatal("Error: cannot delete bucket notification")
+ }
+}
+
+// Tests comprehensive list of all methods.
+func TestFunctional(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := New(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ true,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ exists, err = c.BucketExists(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ if !exists {
+ t.Fatal("Error: could not find ", bucketName)
+ }
+
+ // Asserting the default bucket policy.
+ policyAccess, err := c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policyAccess != "none" {
+ t.Fatalf("Default bucket policy incorrect")
+ }
+ // Set the bucket policy to 'public readonly'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // should return policy `readonly`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policyAccess != "readonly" {
+ t.Fatalf("Expected bucket policy to be readonly")
+ }
+
+ // Make the bucket 'public writeonly'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // should return policy `writeonly`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policyAccess != "writeonly" {
+ t.Fatalf("Expected bucket policy to be writeonly")
+ }
+ // Make the bucket 'public read/write'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // should return policy `readwrite`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policyAccess != "readwrite" {
+ t.Fatalf("Expected bucket policy to be readwrite")
+ }
+ // List all buckets.
+ buckets, err := c.ListBuckets()
+ if len(buckets) == 0 {
+ t.Fatal("Error: list buckets cannot be empty", buckets)
+ }
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ t.Fatal("Error: bucket ", bucketName, "not found")
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("f"), 1<<19)
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if n != int64(len(buf)) {
+ t.Fatal("Error: bad length ", n, len(buf))
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-nolength")
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ t.Fatal("Error: object " + objectName + " not found.")
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ t.Fatal("Error: object " + objectName + " not found.")
+ }
+
+ incompObjNotFound := true
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ t.Fatal("Error: unexpected dangling incomplete upload found.")
+ }
+
+ newReader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ // Verify if presigned url works.
+ resp, err := http.Get(presignedGetURL.String())
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch for presigned GET URL.")
+ }
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ }
+
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ buf = bytes.Repeat([]byte("g"), 1<<19)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ t.Fatal("Error: bytes mismatch.")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err == nil {
+ t.Fatal("Error:")
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ t.Fatal("Error: ", err)
+ }
+ if err = os.Remove(fileName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ t.Fatal("Error: ", err)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
new file mode 100644
index 000000000..817a8c2c7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -0,0 +1,394 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+type customReader struct{}
+
+func (c *customReader) Read(p []byte) (n int, err error) {
+ return 0, nil
+}
+
+func (c *customReader) Size() (n int64) {
+ return 10
+}
+
+// Tests getReaderSize() for various Reader types.
+func TestGetReaderSize(t *testing.T) {
+ var reader io.Reader
+ size, err := getReaderSize(reader)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != -1 {
+ t.Fatal("Reader shouldn't have any length.")
+ }
+
+ bytesReader := bytes.NewReader([]byte("Hello World"))
+ size, err = getReaderSize(bytesReader)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != int64(len("Hello World")) {
+ t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
+ }
+
+ size, err = getReaderSize(new(customReader))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != int64(10) {
+ t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
+ }
+
+ stringsReader := strings.NewReader("Hello World")
+ size, err = getReaderSize(stringsReader)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != int64(len("Hello World")) {
+ t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
+ }
+
+ // Create request channel.
+ reqCh := make(chan getRequest, 1)
+ // Create response channel.
+ resCh := make(chan getResponse, 1)
+ // Create done channel.
+ doneCh := make(chan struct{})
+
+ objectInfo := ObjectInfo{Size: 10}
+ // Create the first request.
+ firstReq := getRequest{
+ isReadOp: false, // Perform only a HEAD object to get objectInfo.
+ isFirstReq: true,
+ }
+ // Create the expected response.
+ firstRes := getResponse{
+ objectInfo: objectInfo,
+ }
+ // Send the expected response.
+ resCh <- firstRes
+
+ // Test setting size on the first request.
+ objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
+ defer objectReaderFirstReq.Close()
+ // Not checking the response here...just that the reader size is correct.
+ _, err = objectReaderFirstReq.doGetRequest(firstReq)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Validate that the reader size is the objectInfo size.
+ size, err = getReaderSize(objectReaderFirstReq)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != int64(10) {
+ t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
+ }
+
+ fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ defer fileReader.Close()
+ defer os.RemoveAll(fileReader.Name())
+
+ size, err = getReaderSize(fileReader)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size == -1 {
+ t.Fatal("Reader length for file cannot be -1.")
+ }
+
+ // Verify for standard input, output and error file descriptors.
+ size, err = getReaderSize(os.Stdin)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != -1 {
+ t.Fatal("Stdin should have length of -1.")
+ }
+ size, err = getReaderSize(os.Stdout)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != -1 {
+ t.Fatal("Stdout should have length of -1.")
+ }
+ size, err = getReaderSize(os.Stderr)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != -1 {
+ t.Fatal("Stderr should have length of -1.")
+ }
+ file, err := os.Open(os.TempDir())
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ defer file.Close()
+ _, err = getReaderSize(file)
+ if err == nil {
+ t.Fatal("Input file as directory should throw an error.")
+ }
+}
+
+// Tests valid hosts for location.
+func TestValidBucketLocation(t *testing.T) {
+ s3Hosts := []struct {
+ bucketLocation string
+ endpoint string
+ }{
+ {"us-east-1", "s3.amazonaws.com"},
+ {"unknown", "s3.amazonaws.com"},
+ {"ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"},
+ }
+ for _, s3Host := range s3Hosts {
+ endpoint := getS3Endpoint(s3Host.bucketLocation)
+ if endpoint != s3Host.endpoint {
+ t.Fatal("Error: invalid bucket location", endpoint)
+ }
+ }
+}
+
+// Tests temp file.
+func TestTempFile(t *testing.T) {
+ tmpFile, err := newTempFile("testing")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ fileName := tmpFile.Name()
+ // Closing temporary file purges the file.
+ err = tmpFile.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ st, err := os.Stat(fileName)
+ if err != nil && !os.IsNotExist(err) {
+ t.Fatal("Error:", err)
+ }
+ if err == nil && st != nil {
+ t.Fatal("Error: file should be deleted and should not exist.")
+ }
+}
+
+// Tests url encoding.
+func TestEncodeURL2Path(t *testing.T) {
+ type urlStrings struct {
+ objName string
+ encodedObjName string
+ }
+
+ bucketName := "bucketName"
+ want := []urlStrings{
+ {
+ objName: "本語",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E",
+ },
+ {
+ objName: "本語.1",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
+ },
+ {
+ objName: ">123>3123123",
+ encodedObjName: "%3E123%3E3123123",
+ },
+ {
+ objName: "test 1 2.txt",
+ encodedObjName: "test%201%202.txt",
+ },
+ {
+ objName: "test++ 1.txt",
+ encodedObjName: "test%2B%2B%201.txt",
+ },
+ }
+
+ for _, o := range want {
+ u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ urlPath := "/" + bucketName + "/" + o.encodedObjName
+ if urlPath != encodeURL2Path(u) {
+ t.Fatal("Error")
+ }
+ }
+}
+
+// Tests error response structure.
+func TestErrorResponse(t *testing.T) {
+ var err error
+ err = ErrorResponse{
+ Code: "Testing",
+ }
+ errResp := ToErrorResponse(err)
+ if errResp.Code != "Testing" {
+ t.Fatal("Type conversion failed, we have an empty struct.")
+ }
+
+ // Test http response decoding.
+ var httpResponse *http.Response
+ // Set empty variables
+ httpResponse = nil
+ var bucketName, objectName string
+
+ // Should fail with invalid argument.
+ err = httpRespToErrorResponse(httpResponse, bucketName, objectName)
+ errResp = ToErrorResponse(err)
+ if errResp.Code != "InvalidArgument" {
+ t.Fatal("Empty response input should return invalid argument.")
+ }
+}
+
+// Tests signature calculation.
+func TestSignatureCalculation(t *testing.T) {
+ req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ req = signV4(*req, "", "", "us-east-1")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = preSignV4(*req, "", "", "us-east-1", 0)
+ if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = signV2(*req, "", "")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = preSignV2(*req, "", "", 0)
+ if strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
+ if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: normal credentials should have Signature query resource.")
+ }
+
+ req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
+ if !strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: normal credentials should not have Signature query resource.")
+ }
+}
+
+// Tests signature type.
+func TestSignatureType(t *testing.T) {
+ clnt := Client{}
+ if !clnt.signature.isV4() {
+ t.Fatal("Error")
+ }
+ clnt.signature = SignatureV2
+ if !clnt.signature.isV2() {
+ t.Fatal("Error")
+ }
+ if clnt.signature.isV4() {
+ t.Fatal("Error")
+ }
+ clnt.signature = SignatureV4
+ if !clnt.signature.isV4() {
+ t.Fatal("Error")
+ }
+}
+
+// Tests bucket policy types.
+func TestBucketPolicyTypes(t *testing.T) {
+ want := map[string]bool{
+ "none": true,
+ "readonly": true,
+ "writeonly": true,
+ "readwrite": true,
+ "invalid": false,
+ }
+ for bucketPolicy, ok := range want {
+ if policy.BucketPolicy(bucketPolicy).IsValidBucketPolicy() != ok {
+ t.Fatal("Error")
+ }
+ }
+}
+
+// Tests optimal part size.
+func TestPartSize(t *testing.T) {
+ _, _, _, err := optimalPartInfo(5000000000000000000)
+ if err == nil {
+ t.Fatal("Error: should fail")
+ }
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if totalPartsCount != 9103 {
+ t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
+ }
+ if partSize != 603979776 {
+ t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
+ }
+ if lastPartSize != 134217728 {
+ t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
+ }
+ _, partSize, _, err = optimalPartInfo(5000000000)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if partSize != minPartSize {
+ t.Fatalf("Error: expecting part size of %v: got %v instead", minPartSize, partSize)
+ }
+ totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if totalPartsCount != 9103 {
+ t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
+ }
+ if partSize != 603979776 {
+ t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
+ }
+ if lastPartSize != 134217728 {
+ t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
new file mode 100644
index 000000000..a5dc2b226
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -0,0 +1,37 @@
+# version format
+version: "{build}"
+
+# Operating system (build VM template)
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\minio\minio-go
+
+# environment variables
+environment:
+ GOPATH: c:\gopath
+ GO15VENDOREXPERIMENT: 1
+
+# scripts that run after cloning repository
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+ - go env
+ - go get -u github.com/golang/lint/golint
+ - go get -u github.com/remyoudompheng/go-misc/deadcode
+ - go get -u github.com/gordonklaus/ineffassign
+
+# to run your custom scripts instead of automatic MSBuild
+build_script:
+ - go vet ./...
+ - gofmt -s -l .
+ - golint github.com/minio/minio-go...
+ - deadcode
+ - ineffassign .
+ - go test -short -v
+ - go test -short -race -v
+
+# to disable automatic tests
+test: off
+
+# to disable deployment
+deploy: off
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
new file mode 100644
index 000000000..4ad106959
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -0,0 +1,197 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/hex"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+)
+
+// bucketLocationCache - Provides simple mechanism to hold bucket
+// locations in memory.
+type bucketLocationCache struct {
+ // mutex is used for handling the concurrent
+ // read/write requests for cache.
+ sync.RWMutex
+
+ // items holds the cached bucket locations.
+ items map[string]string
+}
+
+// newBucketLocationCache - Provides a new bucket location cache to be
+// used internally with the client object.
+func newBucketLocationCache() *bucketLocationCache {
+ return &bucketLocationCache{
+ items: make(map[string]string),
+ }
+}
+
+// Get - Returns a value of a given key if it exists.
+func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
+ r.RLock()
+ defer r.RUnlock()
+ location, ok = r.items[bucketName]
+ return
+}
+
+// Set - Will persist a value into cache.
+func (r *bucketLocationCache) Set(bucketName string, location string) {
+ r.Lock()
+ defer r.Unlock()
+ r.items[bucketName] = location
+}
+
+// Delete - Deletes a bucket name from cache.
+func (r *bucketLocationCache) Delete(bucketName string) {
+ r.Lock()
+ defer r.Unlock()
+ delete(r.items, bucketName)
+}
+
+// GetBucketLocation - get location for the bucket name from location cache, if not
+// fetch freshly by making a new request.
+func (c Client) GetBucketLocation(bucketName string) (string, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ return c.getBucketLocation(bucketName)
+}
+
+// getBucketLocation - Get location for the bucketName from location map cache, if not
+// fetch freshly by making a new request.
+func (c Client) getBucketLocation(bucketName string) (string, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ return location, nil
+ }
+
+ if isAmazonChinaEndpoint(c.endpointURL) {
+ // For china specifically we need to set everything to
+ // cn-north-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // China region.
+ return "cn-north-1", nil
+ }
+
+ // Initialize a new request.
+ req, err := c.getBucketLocationRequest(bucketName)
+ if err != nil {
+ return "", err
+ }
+
+ // Initiate the request.
+ resp, err := c.do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+ location, err := processBucketLocationResponse(resp, bucketName)
+ if err != nil {
+ return "", err
+ }
+ c.bucketLocCache.Set(bucketName, location)
+ return location, nil
+}
+
+// processes the getBucketLocation http response from the server.
+func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ err = httpRespToErrorResponse(resp, bucketName, "")
+ errResp := ToErrorResponse(err)
+ // For access denied error, it could be an anonymous
+ // request. Move forward and let the top level callers
+ // succeed if possible based on their policy.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ return "us-east-1", nil
+ }
+ return "", err
+ }
+ }
+
+ // Extract location.
+ var locationConstraint string
+ err = xmlDecoder(resp.Body, &locationConstraint)
+ if err != nil {
+ return "", err
+ }
+
+ location := locationConstraint
+ // Location is empty will be 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // Location can be 'EU' convert it to meaningful 'eu-west-1'.
+ if location == "EU" {
+ location = "eu-west-1"
+ }
+
+ // Save the location into cache.
+
+ // Return.
+ return location, nil
+}
+
+// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
+func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
+ // Set location query.
+ urlValues := make(url.Values)
+ urlValues.Set("location", "")
+
+ // Set get bucket location always as path style.
+ targetURL, err := url.Parse(c.endpointURL)
+ if err != nil {
+ return nil, err
+ }
+ targetURL.Path = path.Join(bucketName, "") + "/"
+ targetURL.RawQuery = urlValues.Encode()
+
+ // Get a new HTTP request for the method.
+ req, err := http.NewRequest("GET", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ var contentSha256 string
+ if c.secure {
+ contentSha256 = unsignedPayload
+ } else {
+ contentSha256 = hex.EncodeToString(sum256([]byte{}))
+ }
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+ return req, nil
+}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go
new file mode 100644
index 000000000..81cfbc097
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go
@@ -0,0 +1,323 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/xml"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "testing"
+)
+
+// Test validates `newBucketLocationCache`.
+func TestNewBucketLocationCache(t *testing.T) {
+ expectedBucketLocationcache := &bucketLocationCache{
+ items: make(map[string]string),
+ }
+ actualBucketLocationCache := newBucketLocationCache()
+
+ if !reflect.DeepEqual(actualBucketLocationCache, expectedBucketLocationcache) {
+ t.Errorf("Unexpected return value")
+ }
+}
+
+// Tests validate bucketLocationCache operations.
+func TestBucketLocationCacheOps(t *testing.T) {
+ testBucketLocationCache := newBucketLocationCache()
+ expectedBucketName := "minio-bucket"
+ expectedLocation := "us-east-1"
+ testBucketLocationCache.Set(expectedBucketName, expectedLocation)
+ actualLocation, ok := testBucketLocationCache.Get(expectedBucketName)
+ if !ok {
+ t.Errorf("Bucket location cache not set")
+ }
+ if expectedLocation != actualLocation {
+ t.Errorf("Bucket location cache not set to expected value")
+ }
+ testBucketLocationCache.Delete(expectedBucketName)
+ _, ok = testBucketLocationCache.Get(expectedBucketName)
+ if ok {
+ t.Errorf("Bucket location cache not deleted as expected")
+ }
+}
+
+// Tests validate http request generation for 'getBucketLocation'.
+func TestGetBucketLocationRequest(t *testing.T) {
+ // Generates expected http request for getBucketLocation.
+ // Used for asserting with the actual request generated.
+ createExpectedRequest := func(c *Client, bucketName string, req *http.Request) (*http.Request, error) {
+ // Set location query.
+ urlValues := make(url.Values)
+ urlValues.Set("location", "")
+
+ // Set get bucket location always as path style.
+ targetURL, err := url.Parse(c.endpointURL)
+ if err != nil {
+ return nil, err
+ }
+ targetURL.Path = path.Join(bucketName, "") + "/"
+ targetURL.RawQuery = urlValues.Encode()
+
+ // Get a new HTTP request for the method.
+ req, err = http.NewRequest("GET", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+ return req, nil
+
+ }
+ // Info for 'Client' creation.
+ // Will be used as arguments for 'NewClient'.
+ type infoForClient struct {
+ endPoint string
+ accessKey string
+ secretKey string
+ enableInsecure bool
+ }
+ // dataset for 'NewClient' call.
+ info := []infoForClient{
+ // endpoint localhost.
+ // both access-key and secret-key are empty.
+ {"localhost:9000", "", "", false},
+ // both access-key are secret-key exists.
+ {"localhost:9000", "my-access-key", "my-secret-key", false},
+ // one of acess-key and secret-key are empty.
+ {"localhost:9000", "", "my-secret-key", false},
+
+ // endpoint amazon s3.
+ {"s3.amazonaws.com", "", "", false},
+ {"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
+ {"s3.amazonaws.com", "my-acess-key", "", false},
+
+ // endpoint google cloud storage.
+ {"storage.googleapis.com", "", "", false},
+ {"storage.googleapis.com", "my-access-key", "my-secret-key", false},
+ {"storage.googleapis.com", "", "my-secret-key", false},
+
+ // endpoint custom domain running Minio server.
+ {"play.minio.io", "", "", false},
+ {"play.minio.io", "my-access-key", "my-secret-key", false},
+ {"play.minio.io", "my-acess-key", "", false},
+ }
+ testCases := []struct {
+ bucketName string
+ // data for new client creation.
+ info infoForClient
+ // error in the output.
+ err error
+ // flag indicating whether tests should pass.
+ shouldPass bool
+ }{
+ // Client is constructed using the info struct.
+ // case with empty location.
+ {"my-bucket", info[0], nil, true},
+ // case with location set to standard 'us-east-1'.
+ {"my-bucket", info[0], nil, true},
+ // case with location set to a value different from 'us-east-1'.
+ {"my-bucket", info[0], nil, true},
+
+ {"my-bucket", info[1], nil, true},
+ {"my-bucket", info[1], nil, true},
+ {"my-bucket", info[1], nil, true},
+
+ {"my-bucket", info[2], nil, true},
+ {"my-bucket", info[2], nil, true},
+ {"my-bucket", info[2], nil, true},
+
+ {"my-bucket", info[3], nil, true},
+ {"my-bucket", info[3], nil, true},
+ {"my-bucket", info[3], nil, true},
+
+ {"my-bucket", info[4], nil, true},
+ {"my-bucket", info[4], nil, true},
+ {"my-bucket", info[4], nil, true},
+
+ {"my-bucket", info[5], nil, true},
+ {"my-bucket", info[5], nil, true},
+ {"my-bucket", info[5], nil, true},
+
+ {"my-bucket", info[6], nil, true},
+ {"my-bucket", info[6], nil, true},
+ {"my-bucket", info[6], nil, true},
+
+ {"my-bucket", info[7], nil, true},
+ {"my-bucket", info[7], nil, true},
+ {"my-bucket", info[7], nil, true},
+
+ {"my-bucket", info[8], nil, true},
+ {"my-bucket", info[8], nil, true},
+ {"my-bucket", info[8], nil, true},
+
+ {"my-bucket", info[9], nil, true},
+ {"my-bucket", info[9], nil, true},
+ {"my-bucket", info[9], nil, true},
+
+ {"my-bucket", info[10], nil, true},
+ {"my-bucket", info[10], nil, true},
+ {"my-bucket", info[10], nil, true},
+
+ {"my-bucket", info[11], nil, true},
+ {"my-bucket", info[11], nil, true},
+ {"my-bucket", info[11], nil, true},
+ }
+ for i, testCase := range testCases {
+ // cannot create a newclient with empty endPoint value.
+ // validates and creates a new client only if the endPoint value is not empty.
+ client := &Client{}
+ var err error
+ if testCase.info.endPoint != "" {
+
+ client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
+ if err != nil {
+ t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
+ }
+ }
+
+ actualReq, err := client.getBucketLocationRequest(testCase.bucketName)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ expectedReq := &http.Request{}
+ expectedReq, err = createExpectedRequest(client, testCase.bucketName, expectedReq)
+ if err != nil {
+ t.Fatalf("Test %d: Expected request Creation failed", i+1)
+ }
+ if expectedReq.Method != actualReq.Method {
+ t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
+ }
+ if expectedReq.URL.String() != actualReq.URL.String() {
+ t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
+ }
+ if expectedReq.ContentLength != actualReq.ContentLength {
+ t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
+ }
+
+ if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
+ t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
+ }
+ if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
+ t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
+ }
+ }
+ }
+}
+
+// generates http response with bucket location set in the body.
+func generateLocationResponse(resp *http.Response, bodyContent []byte) (*http.Response, error) {
+ resp.StatusCode = http.StatusOK
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyContent))
+ return resp, nil
+}
+
+// Tests the processing of GetPolicy response from server.
+func TestProcessBucketLocationResponse(t *testing.T) {
+ // LocationResponse - format for location response.
+ type LocationResponse struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"`
+ Location string `xml:",chardata"`
+ }
+
+ APIErrors := []APIError{
+ {
+ Code: "AccessDenied",
+ Description: "Access Denied",
+ HTTPStatusCode: http.StatusUnauthorized,
+ },
+ }
+ testCases := []struct {
+ bucketName string
+ inputLocation string
+ isAPIError bool
+ apiErr APIError
+ // expected results.
+ expectedResult string
+ err error
+ // flag indicating whether tests should pass.
+ shouldPass bool
+ }{
+ {"my-bucket", "", true, APIErrors[0], "us-east-1", nil, true},
+ {"my-bucket", "", false, APIError{}, "us-east-1", nil, true},
+ {"my-bucket", "EU", false, APIError{}, "eu-west-1", nil, true},
+ {"my-bucket", "eu-central-1", false, APIError{}, "eu-central-1", nil, true},
+ {"my-bucket", "us-east-1", false, APIError{}, "us-east-1", nil, true},
+ }
+
+ for i, testCase := range testCases {
+ inputResponse := &http.Response{}
+ var err error
+ if testCase.isAPIError {
+ inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
+ } else {
+ inputResponse, err = generateLocationResponse(inputResponse, encodeResponse(LocationResponse{
+ Location: testCase.inputLocation,
+ }))
+ if err != nil {
+ t.Fatalf("Test %d: Creation of valid response failed", i+1)
+ }
+ }
+ actualResult, err := processBucketLocationResponse(inputResponse, "my-bucket")
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+ if err == nil && testCase.shouldPass {
+ if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
+ t.Errorf("Test %d: The expected BucketPolicy doesn't match the actual BucketPolicy", i+1)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
new file mode 100644
index 000000000..121a63a77
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -0,0 +1,228 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "reflect"
+)
+
+// NotificationEventType is a S3 notification event associated to the bucket notification configuration
+type NotificationEventType string
+
+// The role of all event types are described in :
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
+ ObjectCreatePut = "s3:ObjectCreated:Put"
+ ObjectCreatedPost = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+ ObjectCreatedCompleteMultipartUpload = "sh:ObjectCreated:CompleteMultipartUpload"
+ ObjectRemovedAll = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource}
+}
+
+// Return the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// NotificationConfig - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type NotificationConfig struct {
+ Id string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []NotificationEventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewNotificationConfig creates one notification config and sets the given ARN
+func NewNotificationConfig(arn Arn) NotificationConfig {
+ return NotificationConfig{Arn: arn}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *NotificationConfig) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *NotificationConfig) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ NotificationConfig
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ NotificationConfig
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ NotificationConfig
+ Lambda string `xml:"CloudFunction"`
+}
+
+// BucketNotification - the struct that represents the whole XML to be sent to the web service
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
+ newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ if reflect.DeepEqual(n, newTopicConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
+ newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if reflect.DeepEqual(n, newQueueConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
+ newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if reflect.DeepEqual(n, newLambdaConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
new file mode 100644
index 000000000..779ed8c7a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -0,0 +1,46 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+/// Multipart upload defaults.
+
+// miniPartSize - minimum part size 64MiB per object after which
+// putObject behaves internally as multipart.
+const minPartSize = 1024 * 1024 * 64
+
+// maxPartsCount - maximum number of parts for a single multipart session.
+const maxPartsCount = 10000
+
+// maxPartSize - maximum part size 5GiB for a single multipart upload
+// operation.
+const maxPartSize = 1024 * 1024 * 1024 * 5
+
+// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
+// operation.
+const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
+
+// maxMultipartPutObjectSize - maximum size 5TiB of object for
+// Multipart operation.
+const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
+
+// optimalReadBufferSize - optimal buffer 5MiB used for reading
+// through Read operation.
+const optimalReadBufferSize = 1024 * 1024 * 5
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload
+const unsignedPayload = "UNSIGNED-PAYLOAD"
diff --git a/vendor/github.com/minio/minio-go/copy-conditions.go b/vendor/github.com/minio/minio-go/copy-conditions.go
new file mode 100644
index 000000000..5dcdfaef0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/copy-conditions.go
@@ -0,0 +1,97 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "time"
+)
+
+// copyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
+//
+// Example:
+//
+// copyCondition {
+// key: "x-amz-copy-if-modified-since",
+// value: "Tue, 15 Nov 1994 12:45:26 GMT",
+// }
+//
+type copyCondition struct {
+ key string
+ value string
+}
+
+// CopyConditions - copy conditions.
+type CopyConditions struct {
+ conditions []copyCondition
+}
+
+// NewCopyConditions - Instantiate new list of conditions.
+func NewCopyConditions() CopyConditions {
+ return CopyConditions{
+ conditions: make([]copyCondition, 0),
+ }
+}
+
+// SetMatchETag - set match etag.
+func (c *CopyConditions) SetMatchETag(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-match",
+ value: etag,
+ })
+ return nil
+}
+
+// SetMatchETagExcept - set match etag except.
+func (c *CopyConditions) SetMatchETagExcept(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-none-match",
+ value: etag,
+ })
+ return nil
+}
+
+// SetUnmodified - set unmodified time since.
+func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-unmodified-since",
+ value: modTime.Format(http.TimeFormat),
+ })
+ return nil
+}
+
+// SetModified - set modified time since.
+func (c *CopyConditions) SetModified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-modified-since",
+ value: modTime.Format(http.TimeFormat),
+ })
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
new file mode 100644
index 000000000..9977c5df9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -0,0 +1,1101 @@
+# Golang Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## Initialize Minio Client object.
+
+## Minio
+
+```go
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Use a secure connection.
+ ssl := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+
+```
+
+## AWS S3
+
+```go
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Use a secure connection.
+ ssl := true
+
+ // Initialize minio client object.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+
+```
+
+| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations |
+|:---|:---|:---|:---|
+|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) |
+|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) |
+|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) |
+| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) |
+|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) |
+|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
+|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
+| | [`FPutObject`](#FPutObject) | | |
+| | [`FGetObject`](#FGetObject) | | |
+
+## 1. Constructor
+<a name="Minio"></a>
+
+### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*Client, error)
+Initializes a new client object.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`endpoint` | _string_ |S3 object storage endpoint. |
+| `accessKeyID` |_string_ | Access key for the object storage endpoint. |
+| `secretAccessKey` | _string_ |Secret key for the object storage endpoint. |
+|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+## 2. Bucket operations
+
+<a name="MakeBucket"></a>
+### MakeBucket(bucketName string, location string) error
+Creates a new bucket.
+
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`bucketName` | _string_ | Name of the bucket. |
+| `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
+| | |us-east-1 |
+| | |us-west-1 |
+| | |us-west-2 |
+| | |eu-west-1 |
+| | | eu-central-1|
+| | | ap-southeast-1|
+| | | ap-northeast-1|
+| | | ap-southeast-2|
+| | | sa-east-1|
+
+
+__Example__
+
+
+```go
+
+err := minioClient.MakeBucket("mybucket", "us-east-1")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully created mybucket.")
+
+```
+
+<a name="ListBuckets"></a>
+### ListBuckets() ([]BucketInfo, error)
+
+Lists all buckets.
+
+| Param | Type | Description |
+|---|---|---|
+|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
+
+
+| Param | Type | Description |
+|---|---|---|
+|`bucket.Name` | _string_ | bucket name. |
+|`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
+
+
+ __Example__
+
+
+ ```go
+
+ buckets, err := minioClient.ListBuckets()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for _, bucket := range buckets {
+ fmt.Println(bucket)
+}
+
+ ```
+
+<a name="BucketExists"></a>
+### BucketExists(bucketName string) (found bool, err error)
+
+Checks if a bucket exists.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`found` | _bool_ | indicates whether bucket exists or not |
+|`err` | _error_ | standard error |
+
+
+__Example__
+
+
+```go
+
+found, err := minioClient.BucketExists("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if found {
+ fmt.Println("Bucket found")
+}
+
+```
+
+<a name="RemoveBucket"></a>
+### RemoveBucket(bucketName string) error
+
+Removes a bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+
+__Example__
+
+
+```go
+
+err := minioClient.RemoveBucket("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="ListObjects"></a>
+### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+
+Lists objects in a bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
+| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
+|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |name of the object. |
+|`objectInfo.Size` | _int64_ |size of the object. |
+|`objectInfo.ETag` | _string_ |etag of the object. |
+|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+
+
+```go
+
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+
+```
+
+
+<a name="ListObjectsV2"></a>
+### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+
+Lists objects in a bucket using the recommanded listing API v2
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
+| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
+|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |name of the object. |
+|`objectInfo.Size` | _int64_ |size of the object. |
+|`objectInfo.ETag` | _string_ |etag of the object. |
+|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+
+
+```go
+
+// Create a done channel to control 'ListObjectsV2' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjectsV2("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+
+```
+
+<a name="ListIncompleteUploads"></a>
+### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
+
+Lists partially uploaded objects in a bucket.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+| `prefix` |_string_ | prefix of the object names that are partially uploaded |
+| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
+|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
+|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
+|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
+
+__Example__
+
+
+```go
+
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true // Recursively list everything at 'myprefix'
+multiPartObjectCh := minioClient.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
+for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ fmt.Println(multiPartObject.Err)
+ return
+ }
+ fmt.Println(multiPartObject)
+}
+
+```
+
+## 3. Object operations
+
+<a name="GetObject"></a>
+### GetObject(bucketName string, objectName string) (*Object, error)
+
+Downloads an object.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+
+
+__Return Value__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_ represents object reader |
+
+
+__Example__
+
+
+```go
+
+object, err := minioClient.GetObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="FGetObject"></a>
+### FGetObject(bucketName string, objectName string, filePath string) error
+ Downloads and saves the object as a file in the local filesystem.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`filePath` | _string_ |path to which the object data will be written to. |
+
+
+__Example__
+
+
+```go
+
+err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="PutObject"></a>
+### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+
+Uploads an object.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`reader` | _io.Reader_ |Any golang object implementing io.Reader. |
+|`contentType` | _string_ |content type of the object. |
+
+
+__Example__
+
+
+Uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, PutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+
+In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
+
+
+```go
+
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+n, err := minioClient.PutObject("mybucket", "myobject", file, "application/octet-stream")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+
+<a name="CopyObject"></a>
+### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
+
+Copy a source object into a new object with the provided name in the provided bucket.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`objectSource` | _string_ |name of the object source. |
+|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`].|
+
+
+__Example__
+
+
+```go
+
+// All following conditions are allowed and can be combined together.
+
+// Set copy conditions.
+var copyConds = minio.NewCopyConditions()
+// Set modified condition, copy object modified since 2014 April.
+copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+// Set unmodified condition, copy object unmodified since 2014 April.
+// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+// Set matching ETag condition, copy object which matches the following ETag.
+// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+// Set matching ETag except condition, copy object which does not match the following ETag.
+// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+
+err := minioClient.CopyObject("mybucket", "myobject", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="FPutObject"></a>
+### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
+
+Uploads contents from a file to objectName.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`filePath` | _string_ |file path of the file to be uploaded. |
+|`contentType` | _string_ |content type of the object. |
+
+
+__Example__
+
+
+FPutObject uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, FPutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+
+In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
+
+```go
+
+n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="StatObject"></a>
+### StatObject(bucketName string, objectName string) (ObjectInfo, error)
+
+Gets metadata of an object.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objInfo.ETag` | _string_ |etag of the object.|
+|`objInfo.ContentType` | _string_ |Content-Type of the object.|
+|`objInfo.Size` | _int64_ |size of the object.|
+
+
+ __Example__
+
+
+```go
+
+objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println(objInfo)
+
+```
+
+<a name="RemoveObject"></a>
+### RemoveObject(bucketName string, objectName string) error
+
+Removes an object.
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+
+
+```go
+
+err := minioClient.RemoveObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
+
+Removes a list of objects obtained from an input channel. The call internally buffers up `1000` at
+a time and initiates a delete request to the server. Upon any error is sent through the error channel.
+
+__Parameters__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectsCh` | _chan string_ | write prefixes of objects to be removed |
+
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`errorCh` | _chan minio.RemoveObjectError | read objects deletion errors |
+
+
+
+```go
+
+errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
+for e := range errorCh {
+ fmt.Println("Error detected during deletion: " + e.Err.Error())
+}
+
+```
+
+
+
+<a name="RemoveIncompleteUpload"></a>
+### RemoveIncompleteUpload(bucketName string, objectName string) error
+
+Removes a partially uploaded object.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+
+__Example__
+
+
+```go
+
+err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+## 4. Presigned operations
+
+
+<a name="PresignedGetObject"></a>
+### PresignedGetObject(bucketName string, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+
+Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`expiry` | _time.Duration_ |expiry in seconds. |
+|`reqParams` | _url.Values_ |additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
+
+
+__Example__
+
+
+```go
+
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedGetObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="PresignedPutObject"></a>
+### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
+
+Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
+
+NOTE: you can upload to S3 only with specified object name.
+
+
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectName` | _string_ |name of the object. |
+|`expiry` | _time.Duration_ |expiry in seconds. |
+
+
+__Example__
+
+
+```go
+
+// Generates a url which expires in a day.
+expiry := time.Second * 24 * 60 * 60 // 1 day.
+presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+ fmt.Println(presignedURL)
+
+```
+
+<a name="PresignedPostPolicy"></a>
+### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+
+Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set.
+
+Create policy :
+
+
+```go
+
+policy := minio.NewPostPolicy()
+
+```
+
+Apply upload policy restrictions:
+
+
+```go
+
+policy.SetBucket("mybucket")
+policy.SetKey("myobject")
+policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+
+// Only allow 'png' images.
+policy.SetContentType("image/png")
+
+// Only allow content size in range 1KB to 1MB.
+policy.SetContentLengthRange(1024, 1024*1024)
+
+// Get the POST form key/value object:
+
+url, formData, err := minioClient.PresignedPostPolicy(policy)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+
+POST your content from the command line using `curl`:
+
+
+```go
+fmt.Printf("curl ")
+for k, v := range formData {
+ fmt.Printf("-F %s=%s ", k, v)
+}
+fmt.Printf("-F file=@/etc/bash.bashrc ")
+fmt.Printf("%s\n", url)
+```
+
+## 5. Bucket policy/notification operations
+
+<a name="SetBucketPolicy"></a>
+### SetBucketPolicy(bucketname string, objectPrefix string, policy policy.BucketPolicy) error
+
+Set access permissions on bucket or an object prefix.
+
+Importing `github.com/minio/minio-go/pkg/policy` package is needed.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket.|
+|`objectPrefix` | _string_ |name of the object prefix.|
+|`policy` | _policy.BucketPolicy_ |policy can be:|
+|| |policy.BucketPolicyNone|
+| | |policy.BucketPolicyReadOnly|
+|| |policy.BucketPolicyReadWrite|
+| | |policy.BucketPolicyWriteOnly|
+
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`err` | _error_ |standard error |
+
+
+__Example__
+
+
+```go
+
+err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+```
+
+<a name="GetBucketPolicy"></a>
+### GetBucketPolicy(bucketName string, objectPrefix string) (policy.BucketPolicy, error)
+
+Get access permissions on a bucket or a prefix.
+
+Importing `github.com/minio/minio-go/pkg/policy` package is needed.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectPrefix` | _string_ |name of the object prefix |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketPolicy` | _policy.BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+
+bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Access permissions for mybucket is", bucketPolicy)
+
+```
+
+<a name="ListBucketPolicies"></a>
+### ListBucketPolicies(bucketName string, objectPrefix string) (map[string]BucketPolicy, error)
+
+Get access permissions rules associated to the specified bucket and prefix.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectPrefix` | _string_ |name of the object prefix |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketPolicies` | _map[string]BucketPolicy_ |map that contains object resources paths with their associated permissions |
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+
+bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for resource, permission := range bucketPolicies {
+ fmt.Println(resource, " => ", permission)
+}
+
+```
+
+<a name="GetBucketNotification"></a>
+### GetBucketNotification(bucketName string) (BucketNotification, error)
+
+Get all notification configurations related to the specified bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+bucketNotification, err := minioClient.GetBucketNotification("mybucket")
+if err != nil {
+ for _, topicConfig := range bucketNotification.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ fmt.Println(e + " event is enabled")
+ }
+ }
+}
+```
+
+<a name="SetBucketNotification"></a>
+### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
+
+Set a new bucket notification on a bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`bucketNotification` | _BucketNotification_ |bucket notification. |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+topicArn := NewArn("aws", "sns", "us-east-1", "804605494417", "PhotoUpdate")
+
+topicConfig := NewNotificationConfig(topicArn)
+topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+lambdaConfig.AddFilterPrefix("photos/")
+lambdaConfig.AddFilterSuffix(".jpg")
+
+bucketNotification := BucketNotification{}
+bucketNotification.AddTopic(topicConfig)
+err := c.SetBucketNotification(bucketName, bucketNotification)
+if err != nil {
+ fmt.Println("Cannot set the bucket notification: " + err)
+}
+```
+
+<a name="RemoveAllBucketNotification"></a>
+### RemoveAllBucketNotification(bucketName string) error
+
+Remove all configured bucket notifications on a bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+err := c.RemoveAllBucketNotification(bucketName)
+if err != nil {
+ fmt.Println("Cannot remove bucket notifications.")
+}
+```
+
+<a name="ListenBucketNotification"></a>
+### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
+
+ListenBucketNotification API receives bucket notification events through the
+notification channel. The returned notification channel has two fields
+'Records' and 'Err'.
+
+- 'Records' holds the notifications received from the server.
+- 'Err' indicates any error while processing the received notifications.
+
+NOTE: Notification channel is closed at the first occurrence of an error.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ | Bucket to listen notifications from. |
+|`prefix` | _string_ | Object key prefix to filter notifications for. |
+|`suffix` | _string_ | Object key suffix to filter notifications for. |
+|`events` | _[]string_| Enables notifications for specific event types. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
+|`NotificationInfo` | _object_ | Notification object represents events info. |
+|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
+
+
+__Example__
+
+
+```go
+
+// Create a done channel to control 'ListenBucketNotification' go routine.
+doneCh := make(chan struct{})
+
+// Indicate a background go-routine to exit cleanly upon return.
+defer close(doneCh)
+
+// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+}, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
+}
+```
+
+## 6. Explore Further
+
+- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
+
diff --git a/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
new file mode 100644
index 000000000..b682dcb42
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
@@ -0,0 +1,59 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Create a done channel to control 'ListenBucketNotification' go routine.
+ doneCh := make(chan struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
new file mode 100644
index 000000000..945510db8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
@@ -0,0 +1,51 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ found, err := s3Client.BucketExists("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if found {
+ log.Println("Bucket found.")
+ } else {
+ log.Println("Bucket not found.")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
new file mode 100644
index 000000000..9f9e5bc4f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
@@ -0,0 +1,67 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Enable trace.
+ // s3Client.TraceOn(os.Stderr)
+
+ // All following conditions are allowed and can be combined together.
+
+ // Set copy conditions.
+ var copyConds = minio.NewCopyConditions()
+ // Set modified condition, copy object modified since 2014 April.
+ copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+ // Set unmodified condition, copy object unmodified since 2014 April.
+ // copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+ // Set matching ETag condition, copy object which matches the following ETag.
+ // copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+ // Set matching ETag except condition, copy object which does not match the following ETag.
+ // copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+
+ // Initiate copy object.
+ err = s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Copied source object /my-sourcebucketname/my-sourceobjectname to destination /my-bucketname/my-objectname Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
new file mode 100644
index 000000000..bef756dd6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully saved my-filename.csv")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputobject.go b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go
new file mode 100644
index 000000000..f4e60acff
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully uploaded my-filename.csv")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
new file mode 100644
index 000000000..67f010ef3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
@@ -0,0 +1,55 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ notifications, err := s3Client.GetBucketNotification("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Bucket notification are successfully retrieved.")
+
+ for _, topicConfig := range notifications.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ log.Println(e + " event is enabled.")
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
new file mode 100644
index 000000000..e5f960403
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
@@ -0,0 +1,55 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Fetch the policy at 'my-objectprefix'.
+ policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Description of policy output.
+ // "none" - The specified bucket does not have a bucket policy.
+ // "readonly" - Read only operations are allowed.
+ // "writeonly" - Write only operations are allowed.
+ // "readwrite" - both read and write operations are allowed, the bucket is public.
+ log.Println("Success - ", policy)
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getobject.go b/vendor/github.com/minio/minio-go/examples/s3/getobject.go
new file mode 100644
index 000000000..96bb85505
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/getobject.go
@@ -0,0 +1,63 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "io"
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ localFile, err := os.Create("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localFile.Close()
+
+ stat, err := reader.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
new file mode 100644
index 000000000..19a2d1b2b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
@@ -0,0 +1,56 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Fetch the policy at 'my-objectprefix'.
+ policies, err := s3Client.ListBucketPolicies("my-bucketname", "my-objectprefix")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // ListBucketPolicies returns a map of objects policy rules and their associated permissions
+ // e.g. mybucket/downloadfolder/* => readonly
+ // mybucket/shared/* => readwrite
+
+ for resource, permission := range policies {
+ log.Println(resource, " => ", permission)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
new file mode 100644
index 000000000..81a99e627
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
@@ -0,0 +1,48 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ buckets, err := s3Client.ListBuckets()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ for _, bucket := range buckets {
+ log.Println(bucket)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
new file mode 100644
index 000000000..34771e44b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
@@ -0,0 +1,57 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all multipart uploads from a bucket-name with a matching prefix.
+ for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
+ if multipartObject.Err != nil {
+ fmt.Println(multipartObject.Err)
+ return
+ }
+ fmt.Println(multipartObject)
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
new file mode 100644
index 000000000..5dde36746
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
@@ -0,0 +1,76 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // List 'N' number of objects from a bucket-name with a matching prefix.
+ listObjectsN := func(bucket, prefix string, recursive bool, N int) (objsInfo []minio.ObjectInfo, err error) {
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{}, 1)
+
+ // Free the channel upon return.
+ defer close(doneCh)
+
+ i := 1
+ for object := range s3Client.ListObjects(bucket, prefix, recursive, doneCh) {
+ if object.Err != nil {
+ return nil, object.Err
+ }
+ i++
+ // Verify if we have printed N objects.
+ if i == N {
+ // Indicate ListObjects go-routine to exit and stop
+ // feeding the objectInfo channel.
+ doneCh <- struct{}{}
+ }
+ objsInfo = append(objsInfo, object)
+ }
+ return objsInfo, nil
+ }
+
+ // List recursively first 100 entries for prefix 'my-prefixname'.
+ recursive := true
+ objsInfo, err := listObjectsN("my-bucketname", "my-prefixname", recursive, 100)
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ // Print all the entries.
+ fmt.Println(objsInfo)
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go
new file mode 100644
index 000000000..4fd5c069a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go
@@ -0,0 +1,57 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
new file mode 100644
index 000000000..b52b4dab8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
@@ -0,0 +1,57 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjectsV2("my-bucketname", "my-prefixname", true, doneCh) {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/makebucket.go b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go
new file mode 100644
index 000000000..ae222a8af
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go
@@ -0,0 +1,46 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ err = s3Client.MakeBucket("my-bucketname", "us-east-1")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
new file mode 100644
index 000000000..11be0c0a4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
@@ -0,0 +1,53 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Set request parameters
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+ // Gernerate presigned get object url.
+ presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(presignedURL)
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
new file mode 100644
index 000000000..3f37cef38
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
@@ -0,0 +1,59 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ policy := minio.NewPostPolicy()
+ policy.SetBucket("my-bucketname")
+ policy.SetKey("my-objectname")
+ // Expires in 10 days.
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10))
+ // Returns form data for POST form request.
+ url, formData, err := s3Client.PresignedPostPolicy(policy)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("curl ")
+ for k, v := range formData {
+ fmt.Printf("-F %s=%s ", k, v)
+ }
+ fmt.Printf("-F file=@/etc/bash.bashrc ")
+ fmt.Printf("%s\n", url)
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
new file mode 100644
index 000000000..3db6f6e7b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
@@ -0,0 +1,47 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(presignedURL)
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
new file mode 100644
index 000000000..f668adf70
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
@@ -0,0 +1,64 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/cheggaaa/pb"
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ objectInfo, err := reader.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // progress reader is notified as PutObject makes progress with
+ // the read. For partial resume put object, progress reader is
+ // appropriately advanced.
+ progress := pb.New64(objectInfo.Size)
+ progress.Start()
+
+ n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, "application/octet-stream", progress)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject.go b/vendor/github.com/minio/minio-go/examples/s3/putobject.go
new file mode 100644
index 000000000..caa731302
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject.go
@@ -0,0 +1,53 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ object, err := os.Open("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer object.Close()
+
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
new file mode 100644
index 000000000..0f5f3a74d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
@@ -0,0 +1,49 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ err = s3Client.RemoveAllBucketNotification("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Bucket notification are successfully removed.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removebucket.go b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go
new file mode 100644
index 000000000..fb013ca24
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go
@@ -0,0 +1,48 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // This operation will only work if your bucket is empty.
+ err = s3Client.RemoveBucket("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Success")
+
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
new file mode 100644
index 000000000..d486182af
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
@@ -0,0 +1,46 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ err = s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobject.go b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go
new file mode 100644
index 000000000..13b00b41e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ err = s3Client.RemoveObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
new file mode 100644
index 000000000..594606929
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
@@ -0,0 +1,61 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "strconv"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ objectsCh := make(chan string)
+
+ // Send object names that are needed to be removed to objectsCh
+ go func() {
+ defer close(objectsCh)
+ for i := 0; i < 10; i++ {
+ objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh)
+
+ // Print errors received from RemoveObjects API
+ for e := range errorCh {
+ log.Fatalln("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error())
+ }
+
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
new file mode 100644
index 000000000..5fe1e318e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
@@ -0,0 +1,85 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // ARN represents a notification channel that needs to be created in your S3 provider
+ // (e.g. http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html)
+
+ // An example of an ARN:
+ // arn:aws:sns:us-east-1:804064459714:UploadPhoto
+ // ^ ^ ^ ^ ^
+ // Provider __| | | | |
+ // | Region Account ID |_ Notification Name
+ // Service _|
+ //
+ // You should replace YOUR-PROVIDER, YOUR-SERVICE, YOUR-REGION, YOUR-ACCOUNT-ID and YOUR-RESOURCE
+ // with actual values that you receive from the S3 provider
+
+ // Here you create a new Topic notification
+ topicArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ topicConfig := minio.NewNotificationConfig(topicArn)
+ topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+ topicConfig.AddFilterPrefix("photos/")
+ topicConfig.AddFilterSuffix(".jpg")
+
+ // Create a new Queue notification
+ queueArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ queueConfig := minio.NewNotificationConfig(queueArn)
+ queueConfig.AddEvents(minio.ObjectRemovedAll)
+
+ // Create a new Lambda (CloudFunction)
+ lambdaArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ lambdaConfig := minio.NewNotificationConfig(lambdaArn)
+ lambdaConfig.AddEvents(minio.ObjectRemovedAll)
+ lambdaConfig.AddFilterSuffix(".swp")
+
+ // Now, set all previously created notification configs
+ bucketNotification := minio.BucketNotification{}
+ bucketNotification.AddTopic(topicConfig)
+ bucketNotification.AddQueue(queueConfig)
+ bucketNotification.AddLambda(lambdaConfig)
+
+ err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
+ if err != nil {
+ log.Fatalln("Error: " + err.Error())
+ }
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
new file mode 100644
index 000000000..40906ee92
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
@@ -0,0 +1,54 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Description of policy input.
+ // policy.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
+ // policy.BucketPolicyReadOnly - Set read-only operations at a prefix.
+ // policy.BucketPolicyWriteOnly - Set write-only operations at a prefix.
+ // policy.BucketPolicyReadWrite - Set read-write operations at a prefix.
+ err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", policy.BucketPolicyReadWrite)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Success")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/statobject.go b/vendor/github.com/minio/minio-go/examples/s3/statobject.go
new file mode 100644
index 000000000..4c5453a07
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/statobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(stat)
+}
diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go
new file mode 100644
index 000000000..bc9ece049
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/hook-reader.go
@@ -0,0 +1,70 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "io"
+
+// hookReader hooks additional reader in the source stream. It is
+// useful for making progress bars. Second reader is appropriately
+// notified about the exact number of bytes read from the primary
+// source on each Read operation.
+type hookReader struct {
+ source io.Reader
+ hook io.Reader
+}
+
+// Seek implements io.Seeker. Seeks source first, and if necessary
+// seeks hook if Seek method is appropriately found.
+func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
+ // Verify for source has embedded Seeker, use it.
+ sourceSeeker, ok := hr.source.(io.Seeker)
+ if ok {
+ return sourceSeeker.Seek(offset, whence)
+ }
+ // Verify if hook has embedded Seeker, use it.
+ hookSeeker, ok := hr.hook.(io.Seeker)
+ if ok {
+ return hookSeeker.Seek(offset, whence)
+ }
+ return n, nil
+}
+
+// Read implements io.Reader. Always reads from the source, the return
+// value 'n' number of bytes are reported through the hook. Returns
+// error for all non io.EOF conditions.
+func (hr *hookReader) Read(b []byte) (n int, err error) {
+ n, err = hr.source.Read(b)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+ // Progress the hook with the total read bytes from the source.
+ if _, herr := hr.hook.Read(b[:n]); herr != nil {
+ if herr != io.EOF {
+ return n, herr
+ }
+ }
+ return n, err
+}
+
+// newHook returns a io.ReadSeeker which implements hookReader that
+// reports the data read from the source to the hook.
+func newHook(source, hook io.Reader) io.Reader {
+ if hook == nil {
+ return source
+ }
+ return &hookReader{source, hook}
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
new file mode 100644
index 000000000..078bcd1db
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
@@ -0,0 +1,115 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import "github.com/minio/minio-go/pkg/set"
+
+// ConditionKeyMap - map of policy condition key and value.
+type ConditionKeyMap map[string]set.StringSet
+
+// Add - adds key and value. The value is appended If key already exists.
+func (ckm ConditionKeyMap) Add(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ ckm[key] = v.Union(value)
+ } else {
+ ckm[key] = set.CopyStringSet(value)
+ }
+}
+
+// Remove - removes value of given key. If key has empty after removal, the key is also removed.
+func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ if value != nil {
+ ckm[key] = v.Difference(value)
+ }
+
+ if ckm[key].IsEmpty() {
+ delete(ckm, key)
+ }
+ }
+}
+
+// RemoveKey - removes key and its value.
+func (ckm ConditionKeyMap) RemoveKey(key string) {
+ if _, ok := ckm[key]; ok {
+ delete(ckm, key)
+ }
+}
+
+// CopyConditionKeyMap - returns new copy of given ConditionKeyMap.
+func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap {
+ out := make(ConditionKeyMap)
+
+ for k, v := range condKeyMap {
+ out[k] = set.CopyStringSet(v)
+ }
+
+ return out
+}
+
+// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap.
+func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap {
+ out := CopyConditionKeyMap(condKeyMap1)
+
+ for k, v := range condKeyMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = ev.Union(v)
+ } else {
+ out[k] = set.CopyStringSet(v)
+ }
+ }
+
+ return out
+}
+
+// ConditionMap - map of condition and conditional values.
+type ConditionMap map[string]ConditionKeyMap
+
+// Add - adds condition key and condition value. The value is appended if key already exists.
+func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) {
+ if v, ok := cond[condKey]; ok {
+ cond[condKey] = mergeConditionKeyMap(v, condKeyMap)
+ } else {
+ cond[condKey] = CopyConditionKeyMap(condKeyMap)
+ }
+}
+
+// Remove - removes condition key and its value.
+func (cond ConditionMap) Remove(condKey string) {
+ if _, ok := cond[condKey]; ok {
+ delete(cond, condKey)
+ }
+}
+
+// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap.
+func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap {
+ out := make(ConditionMap)
+
+ for k, v := range condMap1 {
+ out[k] = CopyConditionKeyMap(v)
+ }
+
+ for k, v := range condMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = mergeConditionKeyMap(ev, v)
+ } else {
+ out[k] = CopyConditionKeyMap(v)
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go
new file mode 100644
index 000000000..419868f38
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go
@@ -0,0 +1,289 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// ConditionKeyMap.Add() is called and the result is validated.
+func TestConditionKeyMapAdd(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ testCases := []struct {
+ key string
+ value set.StringSet
+ expectedResult string
+ }{
+ // Add new key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
+ // Add existing key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
+ // Add existing key and not value.
+ {"s3:prefix", set.CreateStringSet("world"), `{"s3:prefix":["hello","world"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.Add(testCase.key, testCase.value)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionKeyMap.Remove() is called and the result is validated.
+func TestConditionKeyMapRemove(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ value set.StringSet
+ expectedResult string
+ }{
+ // Remove non-existent key and value.
+ {"s3:myprefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello","world"]}`},
+ // Remove existing key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["world"]}`},
+ // Remove existing key to make the key also removed.
+ {"s3:prefix", set.CreateStringSet("world"), `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.Remove(testCase.key, testCase.value)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionKeyMap.RemoveKey() is called and the result is validated.
+func TestConditionKeyMapRemoveKey(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ expectedResult string
+ }{
+ // Remove non-existent key.
+ {"s3:myprefix", `{"s3:prefix":["hello","world"]}`},
+ // Remove existing key.
+ {"s3:prefix", `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.RemoveKey(testCase.key)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// CopyConditionKeyMap() is called and the result is validated.
+func TestCopyConditionKeyMap(t *testing.T) {
+ emptyCondKeyMap := make(ConditionKeyMap)
+ nonEmptyCondKeyMap := make(ConditionKeyMap)
+ nonEmptyCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ condKeyMap ConditionKeyMap
+ expectedResult string
+ }{
+ // To test empty ConditionKeyMap.
+ {emptyCondKeyMap, `{}`},
+ // To test non-empty ConditionKeyMap.
+ {nonEmptyCondKeyMap, `{"s3:prefix":["hello","world"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap := CopyConditionKeyMap(testCase.condKeyMap)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// mergeConditionKeyMap() is called and the result is validated.
+func TestMergeConditionKeyMap(t *testing.T) {
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
+
+ condKeyMap3 := make(ConditionKeyMap)
+ condKeyMap3.Add("s3:myprefix", set.CreateStringSet("world"))
+
+ testCases := []struct {
+ condKeyMap1 ConditionKeyMap
+ condKeyMap2 ConditionKeyMap
+ expectedResult string
+ }{
+ // Both arguments are empty.
+ {make(ConditionKeyMap), make(ConditionKeyMap), `{}`},
+ // First argument is empty.
+ {make(ConditionKeyMap), condKeyMap1, `{"s3:prefix":["hello"]}`},
+ // Second argument is empty.
+ {condKeyMap1, make(ConditionKeyMap), `{"s3:prefix":["hello"]}`},
+ // Both arguments are same value.
+ {condKeyMap1, condKeyMap1, `{"s3:prefix":["hello"]}`},
+ // Value of second argument will be merged.
+ {condKeyMap1, condKeyMap2, `{"s3:prefix":["hello","world"]}`},
+ // second argument will be added.
+ {condKeyMap1, condKeyMap3, `{"s3:myprefix":["world"],"s3:prefix":["hello"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap := mergeConditionKeyMap(testCase.condKeyMap1, testCase.condKeyMap2)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionMap.Add() is called and the result is validated.
+func TestConditionMapAdd(t *testing.T) {
+ condMap := make(ConditionMap)
+
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ value ConditionKeyMap
+ expectedResult string
+ }{
+ // Add new key and value.
+ {"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Add existing key and value.
+ {"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Add existing key and not value.
+ {"StringEquals", condKeyMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap.Add(testCase.key, testCase.value)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionMap.Remove() is called and the result is validated.
+func TestConditionMapRemove(t *testing.T) {
+ condMap := make(ConditionMap)
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+ condMap.Add("StringEquals", condKeyMap)
+
+ testCases := []struct {
+ key string
+ expectedResult string
+ }{
+ // Remove non-existent key.
+ {"StringNotEquals", `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ // Remove existing key.
+ {"StringEquals", `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap.Remove(testCase.key)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// mergeConditionMap() is called and the result is validated.
+func TestMergeConditionMap(t *testing.T) {
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ condMap1 := make(ConditionMap)
+ condMap1.Add("StringEquals", condKeyMap1)
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
+ condMap2 := make(ConditionMap)
+ condMap2.Add("StringEquals", condKeyMap2)
+
+ condMap3 := make(ConditionMap)
+ condMap3.Add("StringNotEquals", condKeyMap2)
+
+ testCases := []struct {
+ condMap1 ConditionMap
+ condMap2 ConditionMap
+ expectedResult string
+ }{
+ // Both arguments are empty.
+ {make(ConditionMap), make(ConditionMap), `{}`},
+ // First argument is empty.
+ {make(ConditionMap), condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Second argument is empty.
+ {condMap1, make(ConditionMap), `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Both arguments are same value.
+ {condMap1, condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Value of second argument will be merged.
+ {condMap1, condMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ // second argument will be added.
+ {condMap1, condMap3, `{"StringEquals":{"s3:prefix":["hello"]},"StringNotEquals":{"s3:prefix":["world"]}}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap := mergeConditionMap(testCase.condMap1, testCase.condMap2)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
new file mode 100644
index 000000000..f618059cf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
@@ -0,0 +1,635 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// BucketPolicy - Bucket level policy.
+type BucketPolicy string
+
+// Different types of Policies currently supported for buckets.
+const (
+ BucketPolicyNone BucketPolicy = "none"
+ BucketPolicyReadOnly = "readonly"
+ BucketPolicyReadWrite = "readwrite"
+ BucketPolicyWriteOnly = "writeonly"
+)
+
+// isValidBucketPolicy - Is provided policy value supported.
+func (p BucketPolicy) IsValidBucketPolicy() bool {
+ switch p {
+ case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
+ return true
+ }
+ return false
+}
+
+// Resource prefix for all aws resources.
+const awsResourcePrefix = "arn:aws:s3:::"
+
+// Common bucket actions for both read and write policies.
+var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation")
+
+// Read only bucket actions.
+var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket")
+
+// Write only bucket actions.
+var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads")
+
+// Read only object actions.
+var readOnlyObjectActions = set.CreateStringSet("s3:GetObject")
+
+// Write only object actions.
+var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject")
+
+// Read and write object actions.
+var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions)
+
+// All valid bucket and object actions.
+var validActions = commonBucketActions.
+ Union(readOnlyBucketActions).
+ Union(writeOnlyBucketActions).
+ Union(readOnlyObjectActions).
+ Union(writeOnlyObjectActions)
+
+var startsWithFunc = func(resource string, resourcePrefix string) bool {
+ return strings.HasPrefix(resource, resourcePrefix)
+}
+
+// User - canonical users list.
+type User struct {
+ AWS set.StringSet `json:"AWS,omitempty"`
+ CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"`
+}
+
+// Statement - minio policy statement
+type Statement struct {
+ Actions set.StringSet `json:"Action"`
+ Conditions ConditionMap `json:"Condition,omitempty"`
+ Effect string
+ Principal User `json:"Principal"`
+ Resources set.StringSet `json:"Resource"`
+ Sid string
+}
+
+// BucketAccessPolicy - minio policy collection
+type BucketAccessPolicy struct {
+ Version string // date in YYYY-MM-DD format
+ Statements []Statement `json:"Statement"`
+}
+
+// isValidStatement - returns whether given statement is valid to process for given bucket name.
+func isValidStatement(statement Statement, bucketName string) bool {
+ if statement.Actions.Intersection(validActions).IsEmpty() {
+ return false
+ }
+
+ if statement.Effect != "Allow" {
+ return false
+ }
+
+ if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") {
+ return false
+ }
+
+ bucketResource := awsResourcePrefix + bucketName
+ if statement.Resources.Contains(bucketResource) {
+ return true
+ }
+
+ if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() {
+ return false
+ }
+
+ return true
+}
+
+// Returns new statements with bucket actions for given policy.
+func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName)
+
+ statement := Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+
+ if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ if prefix != "" {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix))
+ condMap := make(ConditionMap)
+ condMap.Add("StringEquals", condKeyMap)
+ statement.Conditions = condMap
+ }
+ statements = append(statements, statement)
+ }
+
+ if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+ }
+
+ return statements
+}
+
+// Returns new statements contains object actions for given policy.
+func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ statement := Statement{
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"),
+ Sid: "",
+ }
+
+ if policy == BucketPolicyReadOnly {
+ statement.Actions = readOnlyObjectActions
+ } else if policy == BucketPolicyWriteOnly {
+ statement.Actions = writeOnlyObjectActions
+ } else if policy == BucketPolicyReadWrite {
+ statement.Actions = readWriteObjectActions
+ }
+
+ statements = append(statements, statement)
+ return statements
+}
+
+// Returns new statements for given policy, bucket and prefix.
+func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ ns := newBucketStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ ns = newObjectStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ return statements
+}
+
+// Returns whether given bucket statements are used by other than given prefix statements.
+func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) {
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ for _, s := range statements {
+ if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() {
+ if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnlyInUse = true
+ }
+
+ if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnlyInUse = true
+ }
+ }
+ if readOnlyInUse && writeOnlyInUse {
+ break
+ }
+ }
+
+ return readOnlyInUse, writeOnlyInUse
+}
+
+// Removes object actions in given statement.
+func removeObjectActions(statement Statement, objectResource string) Statement {
+ if statement.Conditions == nil {
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(objectResource)
+ } else {
+ statement.Actions = statement.Actions.Difference(readOnlyObjectActions)
+ statement.Actions = statement.Actions.Difference(writeOnlyObjectActions)
+ }
+ }
+
+ return statement
+}
+
+// Removes bucket actions for given policy in given statement.
+func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement {
+ removeReadOnly := func() {
+ if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ return
+ }
+
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ return
+ }
+
+ if prefix != "" {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+
+ values.Remove(prefix)
+
+ if stringEqualsValue != nil {
+ if values.IsEmpty() {
+ delete(stringEqualsValue, "s3:prefix")
+ }
+ if len(stringEqualsValue) == 0 {
+ delete(statement.Conditions, "StringEquals")
+ }
+ }
+
+ if len(statement.Conditions) == 0 {
+ statement.Conditions = nil
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ }
+ }
+ }
+
+ removeWriteOnly := func() {
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(writeOnlyBucketActions)
+ }
+ }
+
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(bucketResource)
+ } else {
+ if !readOnlyInUse {
+ removeReadOnly()
+ }
+
+ if !writeOnlyInUse {
+ removeWriteOnly()
+ }
+ }
+
+ return statement
+}
+
+// Returns statements containing removed actions/statements for given
+// policy, bucket name and prefix.
+func removeStatements(statements []Statement, bucketName string, prefix string) []Statement {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+ readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix)
+
+ out := []Statement{}
+ readOnlyBucketStatements := []Statement{}
+ s3PrefixValues := set.NewStringSet()
+
+ for _, statement := range statements {
+ if !isValidStatement(statement, bucketName) {
+ out = append(out, statement)
+ continue
+ }
+
+ if statement.Resources.Contains(bucketResource) {
+ if statement.Conditions != nil {
+ statement = removeBucketActions(statement, prefix, bucketResource, false, false)
+ } else {
+ statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse)
+ }
+ } else if statement.Resources.Contains(objectResource) {
+ statement = removeObjectActions(statement, objectResource)
+ }
+
+ if !statement.Actions.IsEmpty() {
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") {
+
+ if statement.Conditions != nil {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+ s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string {
+ return bucketResource + "/" + v + "*"
+ }))
+ } else if !s3PrefixValues.IsEmpty() {
+ readOnlyBucketStatements = append(readOnlyBucketStatements, statement)
+ continue
+ }
+ }
+ out = append(out, statement)
+ }
+ }
+
+ skipBucketStatement := true
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ for _, statement := range out {
+ if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() &&
+ s3PrefixValues.Intersection(statement.Resources).IsEmpty() {
+ skipBucketStatement = false
+ break
+ }
+ }
+
+ for _, statement := range readOnlyBucketStatements {
+ if skipBucketStatement &&
+ statement.Resources.Contains(bucketResource) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ continue
+ }
+
+ out = append(out, statement)
+ }
+
+ if len(out) == 1 {
+ statement := out[0]
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ out = []Statement{}
+ }
+ }
+
+ return out
+}
+
+// Appends given statement into statement list to have unique statements.
+// - If statement already exists in statement list, it ignores.
+// - If statement exists with different conditions, they are merged.
+// - Else the statement is appended to statement list.
+func appendStatement(statements []Statement, statement Statement) []Statement {
+ for i, s := range statements {
+ if s.Actions.Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Resources = s.Resources.Union(statement.Resources)
+ return statements
+ } else if s.Resources.Equals(statement.Resources) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Actions = s.Actions.Union(statement.Actions)
+ return statements
+ }
+
+ if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) &&
+ s.Actions.Intersection(statement.Actions).Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) {
+ if reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ return statements
+ }
+ if s.Conditions != nil && statement.Conditions != nil {
+ if s.Resources.Equals(statement.Resources) {
+ statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions)
+ return statements
+ }
+ }
+ }
+ }
+
+ if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) {
+ return append(statements, statement)
+ }
+
+ return statements
+}
+
+// Appends two statement lists.
+func appendStatements(statements []Statement, appendStatements []Statement) []Statement {
+ for _, s := range appendStatements {
+ statements = appendStatement(statements, s)
+ }
+
+ return statements
+}
+
+// Returns policy of given bucket statement.
+func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) {
+ if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) {
+ return commonFound, readOnly, writeOnly
+ }
+
+ if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Conditions == nil {
+ commonFound = true
+ }
+
+ if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) &&
+ statement.Conditions == nil {
+ writeOnly = true
+ }
+
+ if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ if prefix != "" && statement.Conditions != nil {
+ if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok {
+ if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok {
+ if s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok {
+ if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok {
+ if !s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ }
+ } else if prefix == "" && statement.Conditions == nil {
+ readOnly = true
+ } else if prefix != "" && statement.Conditions == nil {
+ readOnly = true
+ }
+ }
+
+ return commonFound, readOnly, writeOnly
+}
+
+// Returns policy of given object statement.
+func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
+ if statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnly = true
+ }
+ if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnly = true
+ }
+ }
+
+ return readOnly, writeOnly
+}
+
+// Returns policy of given bucket name, prefix in given statements.
+func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ bucketCommonFound := false
+ bucketReadOnly := false
+ bucketWriteOnly := false
+ matchedResource := ""
+ objReadOnly := false
+ objWriteOnly := false
+
+ for _, s := range statements {
+ matchedObjResources := set.NewStringSet()
+ if s.Resources.Contains(objectResource) {
+ matchedObjResources.Add(objectResource)
+ } else {
+ matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource)
+ }
+
+ if !matchedObjResources.IsEmpty() {
+ readOnly, writeOnly := getObjectPolicy(s)
+ for resource := range matchedObjResources {
+ if len(matchedResource) < len(resource) {
+ objReadOnly = readOnly
+ objWriteOnly = writeOnly
+ matchedResource = resource
+ } else if len(matchedResource) == len(resource) {
+ objReadOnly = objReadOnly || readOnly
+ objWriteOnly = objWriteOnly || writeOnly
+ matchedResource = resource
+ }
+ }
+ } else if s.Resources.Contains(bucketResource) {
+ commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix)
+ bucketCommonFound = bucketCommonFound || commonFound
+ bucketReadOnly = bucketReadOnly || readOnly
+ bucketWriteOnly = bucketWriteOnly || writeOnly
+ }
+ }
+
+ policy := BucketPolicyNone
+ if bucketCommonFound {
+ if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly {
+ policy = BucketPolicyReadWrite
+ } else if bucketReadOnly && objReadOnly {
+ policy = BucketPolicyReadOnly
+ } else if bucketWriteOnly && objWriteOnly {
+ policy = BucketPolicyWriteOnly
+ }
+ }
+
+ return policy
+}
+
+// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
+func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
+ policyRules := map[string]BucketPolicy{}
+ objResources := set.NewStringSet()
+ // Search all resources related to objects policy
+ for _, s := range statements {
+ for r := range s.Resources {
+ if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
+ objResources.Add(r)
+ }
+ }
+ }
+ // Pretend that policy resource as an actual object and fetch its policy
+ for r := range objResources {
+ // Put trailing * if exists in asterisk
+ asterisk := ""
+ if strings.HasSuffix(r, "*") {
+ r = r[:len(r)-1]
+ asterisk = "*"
+ }
+ objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
+ p := GetPolicy(statements, bucketName, objectPath)
+ policyRules[bucketName+"/"+objectPath+asterisk] = p
+ }
+ return policyRules
+}
+
+// Returns new statements containing policy of given bucket name and
+// prefix are appended.
+func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
+ out := removeStatements(statements, bucketName, prefix)
+ // fmt.Println("out = ")
+ // printstatement(out)
+ ns := newStatements(policy, bucketName, prefix)
+ // fmt.Println("ns = ")
+ // printstatement(ns)
+
+ rv := appendStatements(out, ns)
+ // fmt.Println("rv = ")
+ // printstatement(rv)
+
+ return rv
+}
+
+// Match function matches wild cards in 'pattern' for resource.
+func resourceMatch(pattern, resource string) bool {
+ if pattern == "" {
+ return resource == pattern
+ }
+ if pattern == "*" {
+ return true
+ }
+ parts := strings.Split(pattern, "*")
+ if len(parts) == 1 {
+ return resource == pattern
+ }
+ tGlob := strings.HasSuffix(pattern, "*")
+ end := len(parts) - 1
+ if !strings.HasPrefix(resource, parts[0]) {
+ return false
+ }
+ for i := 1; i < end; i++ {
+ if !strings.Contains(resource, parts[i]) {
+ return false
+ }
+ idx := strings.Index(resource, parts[i]) + len(parts[i])
+ resource = resource[idx:]
+ }
+ return tGlob || strings.HasSuffix(resource, parts[end])
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go
new file mode 100644
index 000000000..b1862c639
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go
@@ -0,0 +1,1822 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// isValidStatement() is called and the result is validated.
+func TestIsValidStatement(t *testing.T) {
+ testCases := []struct {
+ statement Statement
+ bucketName string
+ expectedResult bool
+ }{
+ // Empty statement and bucket name.
+ {Statement{}, "", false},
+ // Empty statement.
+ {Statement{}, "mybucket", false},
+ // Empty bucket name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false},
+ // Statement with unknown actions.
+ {Statement{
+ Actions: set.CreateStringSet("s3:ListBucketVersions"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with unknown effect.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with nil Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with unknown Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with different bucket name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, "mybucket", false},
+ // Statement with bucket name with suffixed string.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybuckettest/myobject"),
+ }, "mybucket", false},
+ // Statement with bucket name and object name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/myobject"),
+ }, "mybucket", true},
+ // Statement with condition, bucket name and object name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/myobject"),
+ }, "mybucket", true},
+ }
+
+ for _, testCase := range testCases {
+ if result := isValidStatement(testCase.statement, testCase.bucketName); result != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %t, got: %t", testCase, testCase.expectedResult, result)
+ }
+ }
+}
+
+// newStatements() is called and the result is validated.
+func TestNewStatements(t *testing.T) {
+ testCases := []struct {
+ policy BucketPolicy
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // BucketPolicyNone: with empty bucket name and prefix.
+ {BucketPolicyNone, "", "", `[]`},
+ // BucketPolicyNone: with bucket name and empty prefix.
+ {BucketPolicyNone, "mybucket", "", `[]`},
+ // BucketPolicyNone: with empty bucket name empty prefix.
+ {BucketPolicyNone, "", "hello", `[]`},
+ // BucketPolicyNone: with bucket name prefix.
+ {BucketPolicyNone, "mybucket", "hello", `[]`},
+ // BucketPolicyReadOnly: with empty bucket name and prefix.
+ {BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyReadOnly: with bucket name and empty prefix.
+ {BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadOnly: with empty bucket name empty prefix.
+ {BucketPolicyReadOnly, "", "hello", `[]`},
+ // BucketPolicyReadOnly: with bucket name prefix.
+ {BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyReadWrite: with empty bucket name and prefix.
+ {BucketPolicyReadWrite, "", "", `[]`},
+ // BucketPolicyReadWrite: with bucket name and empty prefix.
+ {BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadWrite: with empty bucket name empty prefix.
+ {BucketPolicyReadWrite, "", "hello", `[]`},
+ // BucketPolicyReadWrite: with bucket name prefix.
+ {BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly: with empty bucket name and prefix.
+ {BucketPolicyWriteOnly, "", "", `[]`},
+ // BucketPolicyWriteOnly: with bucket name and empty prefix.
+ {BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly: with empty bucket name empty prefix.
+ {BucketPolicyWriteOnly, "", "hello", `[]`},
+ // BucketPolicyWriteOnly: with bucket name prefix.
+ {BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := newStatements(testCase.policy, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err == nil {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// getInUsePolicy() is called and the result is validated.
+func TestGetInUsePolicy(t *testing.T) {
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult1 bool
+ expectedResult2 bool
+ }{
+ // All empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", false, false},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", false, false},
+ // Non-empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", false, false},
+ // Non-empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "hello", false, false},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", false, false},
+ // Empty statements, non-empty bucket name non-empty prefix.
+ {[]Statement{}, "mybucket", "hello", false, false},
+ // Empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{}, "", "hello", false, false},
+ // Non-empty statements, non-empty bucket name, non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", false, false},
+ // different bucket statements and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", false, false},
+ // different bucket statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", false, false},
+ // different bucket multi-statements and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket/world"),
+ }}, "mybucket", "", false, false},
+ // different bucket multi-statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket/world"),
+ }}, "mybucket", "hello", false, false},
+ // read-only in use.
+ {[]Statement{{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", true, false},
+ // write-only in use.
+ {[]Statement{{
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", false, true},
+ // read-write in use.
+ {[]Statement{{
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", true, true},
+ // read-write multi-statements.
+ {[]Statement{{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/ground"),
+ }}, "mybucket", "hello", true, true},
+ }
+
+ for _, testCase := range testCases {
+ result1, result2 := getInUsePolicy(testCase.statements, testCase.bucketName, testCase.prefix)
+ if !(result1 == testCase.expectedResult1 && result2 == testCase.expectedResult2) {
+ t.Fatalf("%+v: expected: [%t,%t], got: [%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2,
+ result1, result2)
+ }
+ }
+}
+
+// removeStatements() is called and the result is validated.
+func TestRemoveStatements(t *testing.T) {
+ unknownCondMap1 := make(ConditionMap)
+ unknownCondKeyMap1 := make(ConditionKeyMap)
+ unknownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap1.Add("StringNotEquals", unknownCondKeyMap1)
+
+ unknownCondMap11 := make(ConditionMap)
+ unknownCondKeyMap11 := make(ConditionKeyMap)
+ unknownCondKeyMap11.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap11.Add("StringNotEquals", unknownCondKeyMap11)
+
+ unknownCondMap12 := make(ConditionMap)
+ unknownCondKeyMap12 := make(ConditionKeyMap)
+ unknownCondKeyMap12.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap12.Add("StringNotEquals", unknownCondKeyMap12)
+
+ knownCondMap1 := make(ConditionMap)
+ knownCondKeyMap1 := make(ConditionKeyMap)
+ knownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap1.Add("StringEquals", knownCondKeyMap1)
+
+ knownCondMap11 := make(ConditionMap)
+ knownCondKeyMap11 := make(ConditionKeyMap)
+ knownCondKeyMap11.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap11.Add("StringEquals", knownCondKeyMap11)
+
+ knownCondMap12 := make(ConditionMap)
+ knownCondKeyMap12 := make(ConditionKeyMap)
+ knownCondKeyMap12.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap12.Add("StringEquals", knownCondKeyMap12)
+
+ knownCondMap13 := make(ConditionMap)
+ knownCondKeyMap13 := make(ConditionKeyMap)
+ knownCondKeyMap13.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap13.Add("StringEquals", knownCondKeyMap13)
+
+ knownCondMap14 := make(ConditionMap)
+ knownCondKeyMap14 := make(ConditionKeyMap)
+ knownCondKeyMap14.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap14.Add("StringEquals", knownCondKeyMap14)
+
+ knownCondMap2 := make(ConditionMap)
+ knownCondKeyMap2 := make(ConditionKeyMap)
+ knownCondKeyMap2.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+ knownCondMap2.Add("StringEquals", knownCondKeyMap2)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // All empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", `[]`},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Non-empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Non-empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", `[]`},
+ // Empty statements, non-empty bucket name non-empty prefix.
+ {[]Statement{}, "mybucket", "hello", `[]`},
+ // Empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{}, "", "hello", `[]`},
+ // Statement with unknown Actions with empty prefix.
+ {[]Statement{{
+ Actions: set.CreateStringSet("s3:ListBucketVersions", "s3:ListAllMyBuckets"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListAllMyBuckets","s3:ListBucketVersions"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Actions.
+ {[]Statement{{
+ Actions: set.CreateStringSet("s3:ListBucketVersions", "s3:ListAllMyBuckets"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListAllMyBuckets","s3:ListBucketVersions"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Effect with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Deny","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Effect.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Deny","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.AWS with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["arn:aws:iam::AccountNumberWithoutHyphens:root"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.AWS.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["arn:aws:iam::AccountNumberWithoutHyphens:root"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.CanonicalUser with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{CanonicalUser: set.CreateStringSet("649262f44b8145cb")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"CanonicalUser":["649262f44b8145cb"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.CanonicalUser.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{CanonicalUser: set.CreateStringSet("649262f44b8145cb")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"CanonicalUser":["649262f44b8145cb"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Conditions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Resource and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statement with unknown Resource.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statement with known Actions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[]`},
+ // Statement with known Actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[]`},
+ // Statement with known multiple Actions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions).Union(commonBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[]`},
+ // Statement with known multiple Actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions).Union(commonBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[]`},
+ // RemoveBucketActions with readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions contains other object prefix, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap2,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[]`},
+ // readOnlyObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap14,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[]`},
+ // readWriteObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := removeStatements(testCase.statements, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// appendStatement() is called and the result is validated.
+func TestAppendStatement(t *testing.T) {
+ condMap := make(ConditionMap)
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ condMap.Add("StringEquals", condKeyMap)
+
+ condMap1 := make(ConditionMap)
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("world"))
+ condMap1.Add("StringEquals", condKeyMap1)
+
+ unknownCondMap1 := make(ConditionMap)
+ unknownCondKeyMap1 := make(ConditionKeyMap)
+ unknownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("world"))
+ unknownCondMap1.Add("StringNotEquals", unknownCondKeyMap1)
+
+ testCases := []struct {
+ statements []Statement
+ statement Statement
+ expectedResult string
+ }{
+ // Empty statements and empty new statement.
+ {[]Statement{}, Statement{}, `[]`},
+ // Non-empty statements and empty new statement.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{}, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Empty statements and non-empty new statement.
+ {[]Statement{}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Append existing statement.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Append same statement with different resource.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Append same statement with different actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Elements of new statement contains elements in statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Elements of new statement with conditions contains elements in statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statements with condition and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with condition and same resources, and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello","world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with unknown condition and same resources, and new statement with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["world"]},"StringNotEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements without condition and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with condition and new statement without condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements and new statement are different.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := appendStatement(testCase.statements, testCase.statement)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// getBucketPolicy() is called and the result is validated.
+func TestGetBucketPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ worldCondMap := make(ConditionMap)
+ worldCondKeyMap := make(ConditionKeyMap)
+ worldCondKeyMap.Add("s3:prefix", set.CreateStringSet("world"))
+ worldCondMap.Add("StringEquals", worldCondKeyMap)
+
+ notHelloCondMap := make(ConditionMap)
+ notHelloCondMap.Add("StringNotEquals", worldCondKeyMap)
+
+ testCases := []struct {
+ statement Statement
+ prefix string
+ expectedResult1 bool
+ expectedResult2 bool
+ expectedResult3 bool
+ }{
+ // Statement with invalid Effect.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with invalid Effect with prefix.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with invalid Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with invalid Principal.AWS with prefix.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+
+ // Statement with commonBucketActions.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", true, false, false},
+ // Statement with commonBucketActions.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", true, false, false},
+
+ // Statement with commonBucketActions and condition.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with commonBucketActions and condition.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with writeOnlyBucketActions.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, true},
+ // Statement with writeOnlyBucketActions.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, true},
+ // Statement with writeOnlyBucketActions and condition
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with writeOnlyBucketActions and condition.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with readOnlyBucketActions.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, true, false},
+ // Statement with readOnlyBucketActions.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+ // Statement with readOnlyBucketActions with empty condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with empty condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with readOnlyBucketActions with matching condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with matching condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+
+ // Statement with readOnlyBucketActions with different condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: worldCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with different condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: worldCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+
+ // Statement with readOnlyBucketActions with StringNotEquals condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: notHelloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with StringNotEquals condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: notHelloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+ }
+
+ for _, testCase := range testCases {
+ commonFound, readOnly, writeOnly := getBucketPolicy(testCase.statement, testCase.prefix)
+ if !(testCase.expectedResult1 == commonFound && testCase.expectedResult2 == readOnly && testCase.expectedResult3 == writeOnly) {
+ t.Fatalf("%+v: expected: [%t,%t,%t], got: [%t,%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2, testCase.expectedResult3,
+ commonFound, readOnly, writeOnly)
+ }
+ }
+}
+
+// getObjectPolicy() is called and the result is validated.
+func TestGetObjectPolicy(t *testing.T) {
+ testCases := []struct {
+ statement Statement
+ expectedResult1 bool
+ expectedResult2 bool
+ }{
+ // Statement with invalid Effect.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with invalid Principal.AWS.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with condition.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with readOnlyObjectActions.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, true, false},
+ // Statement with writeOnlyObjectActions.
+ {Statement{
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, true},
+ // Statement with readOnlyObjectActions and writeOnlyObjectActions.
+ {Statement{
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, true, true},
+ }
+
+ for _, testCase := range testCases {
+ readOnly, writeOnly := getObjectPolicy(testCase.statement)
+ if !(testCase.expectedResult1 == readOnly && testCase.expectedResult2 == writeOnly) {
+ t.Fatalf("%+v: expected: [%t,%t], got: [%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2,
+ readOnly, writeOnly)
+ }
+ }
+}
+
+// GetPolicyRules is called and the result is validated
+func TestListBucketPolicies(t *testing.T) {
+
+ // Condition for read objects
+ downloadCondMap := make(ConditionMap)
+ downloadCondKeyMap := make(ConditionKeyMap)
+ downloadCondKeyMap.Add("s3:prefix", set.CreateStringSet("download"))
+ downloadCondMap.Add("StringEquals", downloadCondKeyMap)
+
+ // Condition for readwrite objects
+ downloadUploadCondMap := make(ConditionMap)
+ downloadUploadCondKeyMap := make(ConditionKeyMap)
+ downloadUploadCondKeyMap.Add("s3:prefix", set.CreateStringSet("both"))
+ downloadUploadCondMap.Add("StringEquals", downloadUploadCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult map[string]BucketPolicy
+ }{
+ // Empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", map[string]BucketPolicy{}},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", map[string]BucketPolicy{}},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", map[string]BucketPolicy{}},
+ // Readonly object statement
+ {[]Statement{
+ {
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: downloadCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/download*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/download*": BucketPolicyReadOnly}},
+ // Write Only
+ {[]Statement{
+ {
+ Actions: commonBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/upload*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/upload*": BucketPolicyWriteOnly}},
+ // Readwrite
+ {[]Statement{
+ {
+ Actions: commonBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: downloadUploadCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: writeOnlyObjectActions.Union(readOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/both*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/both*": BucketPolicyReadWrite}},
+ }
+
+ for _, testCase := range testCases {
+ policyRules := GetPolicies(testCase.statements, testCase.bucketName)
+ if !reflect.DeepEqual(testCase.expectedResult, policyRules) {
+ t.Fatalf("%+v:\n expected: %+v, got: %+v", testCase, testCase.expectedResult, policyRules)
+ }
+ }
+}
+
+// GetPolicy() is called and the result is validated.
+func TestGetPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult BucketPolicy
+ }{
+ // Empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", BucketPolicyNone},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", BucketPolicyNone},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", BucketPolicyNone},
+ // not-matching Statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // not-matching Statements with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only commonBucketActions.
+ {[]Statement{{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only commonBucketActions with prefix.
+ {[]Statement{{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with prefix with conditons.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only writeOnlyBucketActions.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only writeOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions and conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions and conditions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ }
+
+ for _, testCase := range testCases {
+ policy := GetPolicy(testCase.statements, testCase.bucketName, testCase.prefix)
+ if testCase.expectedResult != policy {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, policy)
+ }
+ }
+}
+
+// SetPolicy() is called and the result is validated.
+func TestSetPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ policy BucketPolicy
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // BucketPolicyNone - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyNone, "", "", `[]`},
+ // BucketPolicyNone - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyNone, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // BucketPolicyNone - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyNone, "mybucket", "", `[]`},
+ // BucketPolicyNone - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyNone, "", "hello", `[]`},
+ // BucketPolicyReadOnly - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyReadOnly - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyReadOnly, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyReadOnly - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadOnly - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "hello", `[]`},
+ // BucketPolicyReadOnly - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyWriteOnly - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyWriteOnly, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "", "hello", `[]`},
+ // BucketPolicyWriteOnly - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "", "", `[]`},
+ // BucketPolicyReadWrite - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyReadWrite, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "", "hello", `[]`},
+ // BucketPolicyReadWrite - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // Set readonly.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set readonly with prefix.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // Set writeonly.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set writeonly with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+
+ // Set readwrite.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set readwrite with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := SetPolicy(testCase.statements, testCase.policy, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// Validates bucket policy string.
+func TestIsValidBucketPolicy(t *testing.T) {
+ testCases := []struct {
+ inputPolicy BucketPolicy
+ expectedResult bool
+ }{
+ // valid inputs.
+ {BucketPolicy("none"), true},
+ {BucketPolicy("readonly"), true},
+ {BucketPolicy("readwrite"), true},
+ {BucketPolicy("writeonly"), true},
+ // invalid input.
+ {BucketPolicy("readwriteonly"), false},
+ {BucketPolicy("writeread"), false},
+ }
+
+ for i, testCase := range testCases {
+ actualResult := testCase.inputPolicy.IsValidBucketPolicy()
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
+ }
+ }
+}
+
+// Tests validate Bucket policy resource matcher.
+func TestBucketPolicyResourceMatch(t *testing.T) {
+
+ // generates\ statement with given resource..
+ generateStatement := func(resource string) Statement {
+ statement := Statement{}
+ statement.Resources = set.CreateStringSet(resource)
+ return statement
+ }
+
+ // generates resource prefix.
+ generateResource := func(bucketName, objectName string) string {
+ return awsResourcePrefix + bucketName + "/" + objectName
+ }
+
+ testCases := []struct {
+ resourceToMatch string
+ statement Statement
+ expectedResourceMatch bool
+ }{
+ // Test case 1-4.
+ // Policy with resource ending with bucket/* allows access to all objects inside the given bucket.
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ // Test case - 5.
+ // Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt.
+ {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), false},
+ // Test case - 6.
+ // Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt.
+ {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
+ // Test case - 7.
+ // Policy with resource ending with bucket/oo* allows access to all subfolders starting with "oo" inside given bucket.
+ {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
+ // Test case - 8.
+ {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
+ // Test case - 9.
+ {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
+ // Test case - 10.
+ // Proves that the name space is flat.
+ {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
+ "minio-bucket"+"/*/India/*/Bihar")), true},
+ // Test case - 11.
+ // Proves that the name space is flat.
+ {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
+ "minio-bucket"+"/*/India/*/Bihar/*")), true},
+ }
+ for i, testCase := range testCases {
+ resources := testCase.statement.Resources.FuncMatch(resourceMatch, testCase.resourceToMatch)
+ actualResourceMatch := resources.Equals(testCase.statement.Resources)
+ if testCase.expectedResourceMatch != actualResourceMatch {
+ t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
new file mode 100644
index 000000000..55084d461
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
@@ -0,0 +1,196 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+// keys - returns StringSet keys.
+func (set StringSet) keys() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.keys())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+// If 'data' contains JSON string array, the set contains each string.
+// If 'data' contains JSON string, the set contains the string as one element.
+// If 'data' contains Other JSON types, JSON parse error is returned.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []string{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(s)
+ }
+ } else {
+ var s string
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(s)
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.keys())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
new file mode 100644
index 000000000..4b74e7065
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
@@ -0,0 +1,322 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "strings"
+ "testing"
+)
+
+// NewStringSet() is called and the result is validated.
+func TestNewStringSet(t *testing.T) {
+ if ss := NewStringSet(); !ss.IsEmpty() {
+ t.Fatalf("expected: true, got: false")
+ }
+}
+
+// CreateStringSet() is called and the result is validated.
+func TestCreateStringSet(t *testing.T) {
+ ss := CreateStringSet("foo")
+ if str := ss.String(); str != `[foo]` {
+ t.Fatalf("expected: %s, got: %s", `["foo"]`, str)
+ }
+}
+
+// CopyStringSet() is called and the result is validated.
+func TestCopyStringSet(t *testing.T) {
+ ss := CreateStringSet("foo")
+ sscopy := CopyStringSet(ss)
+ if !ss.Equals(sscopy) {
+ t.Fatalf("expected: %s, got: %s", ss, sscopy)
+ }
+}
+
+// StringSet.Add() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetAdd(t *testing.T) {
+ testCases := []struct {
+ value string
+ expectedResult string
+ }{
+ // Test first addition.
+ {"foo", `[foo]`},
+ // Test duplicate addition.
+ {"foo", `[foo]`},
+ // Test new addition.
+ {"bar", `[bar foo]`},
+ }
+
+ ss := NewStringSet()
+ for _, testCase := range testCases {
+ ss.Add(testCase.value)
+ if str := ss.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
+
+// StringSet.Remove() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetRemove(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ value string
+ expectedResult string
+ }{
+ // Test removing non-existen item.
+ {"baz", `[bar foo]`},
+ // Test remove existing item.
+ {"foo", `[bar]`},
+ // Test remove existing item again.
+ {"foo", `[bar]`},
+ // Test remove to make set to empty.
+ {"bar", `[]`},
+ }
+
+ for _, testCase := range testCases {
+ ss.Remove(testCase.value)
+ if str := ss.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
+
+// StringSet.Contains() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetContains(t *testing.T) {
+ ss := CreateStringSet("foo")
+ testCases := []struct {
+ value string
+ expectedResult bool
+ }{
+ // Test to check non-existent item.
+ {"bar", false},
+ // Test to check existent item.
+ {"foo", true},
+ // Test to verify case sensitivity.
+ {"Foo", false},
+ }
+
+ for _, testCase := range testCases {
+ if result := ss.Contains(testCase.value); result != testCase.expectedResult {
+ t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.FuncMatch() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetFuncMatch(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ matchFn func(string, string) bool
+ value string
+ expectedResult string
+ }{
+ // Test to check match function doing case insensive compare.
+ {func(setValue string, compareValue string) bool {
+ return strings.ToUpper(setValue) == strings.ToUpper(compareValue)
+ }, "Bar", `[bar]`},
+ // Test to check match function doing prefix check.
+ {func(setValue string, compareValue string) bool {
+ return strings.HasPrefix(compareValue, setValue)
+ }, "foobar", `[foo]`},
+ }
+
+ for _, testCase := range testCases {
+ s := ss.FuncMatch(testCase.matchFn, testCase.value)
+ if result := s.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.ApplyFunc() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetApplyFunc(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ applyFn func(string) string
+ expectedResult string
+ }{
+ // Test to apply function prepending a known string.
+ {func(setValue string) string { return "mybucket/" + setValue }, `[mybucket/bar mybucket/foo]`},
+ // Test to apply function modifying values.
+ {func(setValue string) string { return setValue[1:] }, `[ar oo]`},
+ }
+
+ for _, testCase := range testCases {
+ s := ss.ApplyFunc(testCase.applyFn)
+ if result := s.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Equals() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetEquals(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult bool
+ }{
+ // Test equal set
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), true},
+ // Test second set with more items
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz"), false},
+ // Test second set with less items
+ {CreateStringSet("foo", "bar"), CreateStringSet("bar"), false},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Equals(testCase.set2); result != testCase.expectedResult {
+ t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Intersection() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetIntersection(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test intersecting all values.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test intersecting all values in second set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test intersecting different values in second set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("baz")},
+ // Test intersecting none.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), NewStringSet()},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Intersection(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Difference() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetDifference(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test differing none.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), NewStringSet()},
+ // Test differing in first set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("baz")},
+ // Test differing values in both set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo")},
+ // Test differing all values.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz")},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Difference(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Union() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetUnion(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test union same values.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test union same values in second set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz")},
+ // Test union different values in both set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo", "baz", "bar")},
+ // Test union all different values.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz", "poo", "bar")},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Union(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.MarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetMarshalJSON(t *testing.T) {
+ testCases := []struct {
+ set StringSet
+ expectedResult string
+ }{
+ // Test set with values.
+ {CreateStringSet("foo", "bar"), `["bar","foo"]`},
+ // Test empty set.
+ {NewStringSet(), "[]"},
+ }
+
+ for _, testCase := range testCases {
+ if result, _ := testCase.set.MarshalJSON(); string(result) != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, string(result))
+ }
+ }
+}
+
+// StringSet.UnmarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetUnmarshalJSON(t *testing.T) {
+ testCases := []struct {
+ data []byte
+ expectedResult string
+ }{
+ // Test to convert JSON array to set.
+ {[]byte(`["bar","foo"]`), `[bar foo]`},
+ // Test to convert JSON string to set.
+ {[]byte(`"bar"`), `[bar]`},
+ // Test to convert JSON empty array to set.
+ {[]byte(`[]`), `[]`},
+ // Test to convert JSON empty string to set.
+ {[]byte(`""`), `[]`},
+ }
+
+ for _, testCase := range testCases {
+ var set StringSet
+ set.UnmarshalJSON(testCase.data)
+ if result := set.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.String() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetString(t *testing.T) {
+ testCases := []struct {
+ set StringSet
+ expectedResult string
+ }{
+ // Test empty set.
+ {NewStringSet(), `[]`},
+ // Test set with empty value.
+ {CreateStringSet(""), `[]`},
+ // Test set with value.
+ {CreateStringSet("foo"), `[foo]`},
+ }
+
+ for _, testCase := range testCases {
+ if str := testCase.set.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
new file mode 100644
index 000000000..2a675d770
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -0,0 +1,191 @@
+package minio
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// expirationDateFormat date format for expiration key in json policy.
+const expirationDateFormat = "2006-01-02T15:04:05.999Z"
+
+// policyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+//
+// Example:
+//
+// policyCondition {
+// matchType: "$eq",
+// key: "$Content-Type",
+// value: "image/png",
+// }
+//
+type policyCondition struct {
+ matchType string
+ condition string
+ value string
+}
+
+// PostPolicy - Provides strict static type conversion and validation
+// for Amazon S3's POST policy JSON string.
+type PostPolicy struct {
+ // Expiration date and time of the POST policy.
+ expiration time.Time
+ // Collection of different policy conditions.
+ conditions []policyCondition
+ // ContentLengthRange minimum and maximum allowable size for the
+ // uploaded content.
+ contentLengthRange struct {
+ min int64
+ max int64
+ }
+
+ // Post form data.
+ formData map[string]string
+}
+
+// NewPostPolicy - Instantiate new post policy.
+func NewPostPolicy() *PostPolicy {
+ p := &PostPolicy{}
+ p.conditions = make([]policyCondition, 0)
+ p.formData = make(map[string]string)
+ return p
+}
+
+// SetExpires - Sets expiration time for the new policy.
+func (p *PostPolicy) SetExpires(t time.Time) error {
+ if t.IsZero() {
+ return ErrInvalidArgument("No expiry time set.")
+ }
+ p.expiration = t
+ return nil
+}
+
+// SetKey - Sets an object name for the policy based upload.
+func (p *PostPolicy) SetKey(key string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return ErrInvalidArgument("Object name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$key",
+ value: key,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = key
+ return nil
+}
+
+// SetKeyStartsWith - Sets an object name that an policy based upload
+// can start with.
+func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
+ if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
+ return ErrInvalidArgument("Object prefix is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$key",
+ value: keyStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = keyStartsWith
+ return nil
+}
+
+// SetBucket - Sets bucket at which objects will be uploaded to.
+func (p *PostPolicy) SetBucket(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" || bucketName == "" {
+ return ErrInvalidArgument("Bucket name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$bucket",
+ value: bucketName,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["bucket"] = bucketName
+ return nil
+}
+
+// SetContentType - Sets content-type of the object for this policy
+// based upload.
+func (p *PostPolicy) SetContentType(contentType string) error {
+ if strings.TrimSpace(contentType) == "" || contentType == "" {
+ return ErrInvalidArgument("No content type specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Type",
+ value: contentType,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Type"] = contentType
+ return nil
+}
+
+// SetContentLengthRange - Set new min and max content length
+// condition for all incoming uploads.
+func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
+ if min > max {
+ return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
+ }
+ if min < 0 {
+ return ErrInvalidArgument("Minimum limit cannot be negative.")
+ }
+ if max < 0 {
+ return ErrInvalidArgument("Maximum limit cannot be negative.")
+ }
+ p.contentLengthRange.min = min
+ p.contentLengthRange.max = max
+ return nil
+}
+
+// addNewPolicy - internal helper to validate adding new policies.
+func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
+ if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
+ return ErrInvalidArgument("Policy fields are empty.")
+ }
+ p.conditions = append(p.conditions, policyCond)
+ return nil
+}
+
+// Stringer interface for printing policy in json formatted string.
+func (p PostPolicy) String() string {
+ return string(p.marshalJSON())
+}
+
+// marshalJSON - Provides Marshalled JSON in bytes.
+func (p PostPolicy) marshalJSON() []byte {
+ expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
+ var conditionsStr string
+ conditions := []string{}
+ for _, po := range p.conditions {
+ conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
+ }
+ if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
+ conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
+ p.contentLengthRange.min, p.contentLengthRange.max))
+ }
+ if len(conditions) > 0 {
+ conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
+ }
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionsStr
+ retStr = retStr + "}"
+ return []byte(retStr)
+}
+
+// base64 - Produces base64 of PostPolicy's Marshalled json.
+func (p PostPolicy) base64() string {
+ return base64.StdEncoding.EncodeToString(p.marshalJSON())
+}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/request-signature-v2.go
new file mode 100644
index 000000000..b9f248253
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/request-signature-v2.go
@@ -0,0 +1,322 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Signature and API related constants.
+const (
+ signV2Algorithm = "AWS"
+)
+
+// Encode input URL path to URL encoded path.
+func encodeURL2Path(u *url.URL) (path string) {
+ // Encode URL path.
+ if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
+ hostSplits := strings.SplitN(u.Host, ".", 4)
+ // First element is the bucket name.
+ bucketName := hostSplits[0]
+ path = "/" + bucketName
+ path += u.Path
+ path = urlEncodePath(path)
+ return
+ }
+ if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
+ path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
+ path += u.Path
+ path = urlEncodePath(path)
+ return
+ }
+ path = urlEncodePath(u.Path)
+ return
+}
+
+// preSignV2 - presign the request in following style.
+// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
+func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ d := time.Now().UTC()
+ // Find epoch expires when the request will expire.
+ epochExpires := d.Unix() + expires
+
+ // Add expires header if not present.
+ if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
+ req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ }
+
+ // Get presigned string to sign.
+ stringToSign := preStringifyHTTPReq(req)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Calculate signature.
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+
+ query := req.URL.Query()
+ // Handle specially for Google Cloud Storage.
+ if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
+ query.Set("GoogleAccessId", accessKeyID)
+ } else {
+ query.Set("AWSAccessKeyId", accessKeyID)
+ }
+
+ // Fill in Expires for presigned query.
+ query.Set("Expires", strconv.FormatInt(epochExpires, 10))
+
+ // Encode query and save.
+ req.URL.RawQuery = queryEncode(query)
+
+ // Save signature finally.
+ req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
+
+ // Return.
+ return &req
+}
+
+// postPresignSignatureV2 - presigned signature for PostPolicy
+// request.
+func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(policyBase64))
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+ return signature
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// <HTTP-Request-URI, from the protocol name up to the query string> +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders = <described below>
+
+// signV2 sign the request before Do() (AWS Signature Version 2).
+func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ d := time.Now().UTC()
+
+ // Add date if not present.
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Calculate HMAC for secretAccessKey.
+ stringToSign := stringifyHTTPReq(req)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Prepare auth header.
+ authHeader := new(bytes.Buffer)
+ authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
+ encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
+ encoder.Write(hm.Sum(nil))
+ encoder.Close()
+
+ // Set Authorization header.
+ req.Header.Set("Authorization", authHeader.String())
+
+ return &req
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Expires + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func preStringifyHTTPReq(req http.Request) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writePreSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ isPreSign := true
+ writeCanonicalizedResource(buf, req, isPreSign)
+ return buf.String()
+}
+
+// writePreSignV2Headers - write preSign v2 required headers.
+func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Expires") + "\n")
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func stringifyHTTPReq(req http.Request) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writeSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ isPreSign := false
+ writeCanonicalizedResource(buf, req, isPreSign)
+ return buf.String()
+}
+
+// writeSignV2Headers - write signV2 required headers.
+func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Date") + "\n")
+}
+
+// writeCanonicalizedHeaders - write canonicalized headers.
+func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
+ var protoHeaders []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ // All the AMZ headers should be lowercase
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-amz") {
+ protoHeaders = append(protoHeaders, lk)
+ vals[lk] = vv
+ }
+ }
+ sort.Strings(protoHeaders)
+ for _, k := range protoHeaders {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ if strings.Contains(v, "\n") {
+ // TODO: "Unfold" long headers that
+ // span multiple lines (as allowed by
+ // RFC 2616, section 4.2) by replacing
+ // the folding white-space (including
+ // new-line) by a single space.
+ buf.WriteString(v)
+ } else {
+ buf.WriteString(v)
+ }
+ }
+ buf.WriteByte('\n')
+ }
+}
+
+// The following list is already sorted and should always be, otherwise we could
+// have signature-related issues
+var resourceList = []string{
+ "acl",
+ "delete",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// From the Amazon docs:
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// <HTTP-Request-URI, from the protocol name up to the query string> +
+// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) {
+ // Save request URL.
+ requestURL := req.URL
+ // Get encoded URL path.
+ path := encodeURL2Path(requestURL)
+ if isPreSign {
+ // Get encoded URL path.
+ if len(requestURL.Query()) > 0 {
+ // Keep the usual queries unescaped for string to sign.
+ query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
+ path = path + "?" + query
+ }
+ buf.WriteString(path)
+ return
+ }
+ buf.WriteString(path)
+ if requestURL.RawQuery != "" {
+ var n int
+ vals, _ := url.ParseQuery(requestURL.RawQuery)
+ // Verify if any sub resource queries are present, if yes
+ // canonicallize them.
+ for _, resource := range resourceList {
+ if vv, ok := vals[resource]; ok && len(vv) > 0 {
+ n++
+ // First element
+ switch n {
+ case 1:
+ buf.WriteByte('?')
+ // The rest
+ default:
+ buf.WriteByte('&')
+ }
+ buf.WriteString(resource)
+ // Request parameters
+ if len(vv[0]) > 0 {
+ buf.WriteByte('=')
+ buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2_test.go b/vendor/github.com/minio/minio-go/request-signature-v2_test.go
new file mode 100644
index 000000000..6d861fb81
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/request-signature-v2_test.go
@@ -0,0 +1,35 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "sort"
+ "testing"
+)
+
+// Tests for 'func TestResourceListSorting(t *testing.T)'.
+func TestResourceListSorting(t *testing.T) {
+ sortedResourceList := make([]string, len(resourceList))
+ copy(sortedResourceList, resourceList)
+ sort.Strings(sortedResourceList)
+ for i := 0; i < len(resourceList); i++ {
+ if resourceList[i] != sortedResourceList[i] {
+ t.Errorf("Expected resourceList[%d] = \"%s\", resourceList is not correctly sorted.", i, sortedResourceList[i])
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/request-signature-v4.go
new file mode 100644
index 000000000..2be3808d6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/request-signature-v4.go
@@ -0,0 +1,303 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Signature and API related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+ yyyymmdd = "20060102"
+)
+
+///
+/// Excerpts from @lsegal -
+/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
+///
+/// User-Agent:
+///
+/// This is ignored from signing because signing this causes
+/// problems with generating pre-signed URLs (that are executed
+/// by other agents) or when customers pass requests through
+/// proxies, which may modify the user-agent.
+///
+/// Content-Length:
+///
+/// This is ignored from signing because generating a pre-signed
+/// URL should not provide a content-length constraint,
+/// specifically when vending a S3 pre-signed PUT URL. The
+/// corollary to this is that when sending regular requests
+/// (non-pre-signed), the signature contains a checksum of the
+/// body, which implicitly validates the payload length (since
+/// changing the number of bytes would change the checksum)
+/// and therefore this header is not valuable in the signature.
+///
+/// Content-Type:
+///
+/// Signing this header causes quite a number of problems in
+/// browser environments, where browsers like to modify and
+/// normalize the content-type header in different ways. There is
+/// more information on this in https://goo.gl/2E9gyy. Avoiding
+/// this field simplifies logic and reduces the possibility of
+/// future bugs.
+///
+/// Authorization:
+///
+/// Is skipped for obvious reasons
+///
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secret, loc string, t time.Time) []byte {
+ date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
+ location := sumHMAC(date, []byte(loc))
+ service := sumHMAC(location, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getScope generate a string of a specific date, an AWS region, and a
+// service.
+func getScope(location string, t time.Time) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ location,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCredential generate a credential string.
+func getCredential(accessKeyID, location string, t time.Time) string {
+ scope := getScope(location, t)
+ return accessKeyID + "/" + scope
+}
+
+// getHashedPayload get the hexadecimal value of the SHA256 hash of
+// the request payload.
+func getHashedPayload(req http.Request) string {
+ hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
+ if hashedPayload == "" {
+ // Presign does not have a payload, use S3 recommended value.
+ hashedPayload = unsignedPayload
+ }
+ return hashedPayload
+}
+
+// getCanonicalHeaders generate a list of request headers for
+// signature.
+func getCanonicalHeaders(req http.Request) string {
+ var headers []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ // Save all the headers in canonical form <header>:<value> newline
+ // separated for each header.
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ return buf.String()
+}
+
+// getSignedHeaders generate all signed request headers.
+// i.e lexically sorted, semicolon-separated list of lowercase
+// request header names.
+func getSignedHeaders(req http.Request) string {
+ var headers []string
+ for k := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // Ignored header found continue.
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getCanonicalRequest generate a canonical request of style.
+//
+// canonicalRequest =
+// <HTTPMethod>\n
+// <CanonicalURI>\n
+// <CanonicalQueryString>\n
+// <CanonicalHeaders>\n
+// <SignedHeaders>\n
+// <HashedPayload>
+func getCanonicalRequest(req http.Request) string {
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ urlEncodePath(req.URL.Path),
+ req.URL.RawQuery,
+ getCanonicalHeaders(req),
+ getSignedHeaders(req),
+ getHashedPayload(req),
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
+ stringToSign = stringToSign + getScope(location, t) + "\n"
+ stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
+ return stringToSign
+}
+
+// preSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Get credential string.
+ credential := getCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", signedHeaders)
+ query.Set("X-Amz-Credential", credential)
+ req.URL.RawQuery = query.Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // Gext hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + signature
+
+ return &req
+}
+
+// postPresignSignatureV4 - presigned signature for PostPolicy
+// requests.
+func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ // Get signining key.
+ signingkey := getSigningKey(secretAccessKey, location, t)
+ // Calculate signature.
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// signV4 sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
+func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // Get credential string.
+ credential := getCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // If regular request, construct the final authorization header.
+ parts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return &req
+}
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
new file mode 100644
index 000000000..41b70e474
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -0,0 +1,138 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// MaxRetry is the maximum number of retries before stopping.
+var MaxRetry = 5
+
+// MaxJitter will randomize over the full exponential backoff time
+const MaxJitter = 1.0
+
+// NoJitter disables the use of jitter for randomizing the exponential backoff time
+const NoJitter = 0.0
+
+// newRetryTimer creates a timer with exponentially increasing delays
+// until the maximum retry attempts are reached.
+func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1<<uint(attempt))
+ if sleep > cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ for i := 0; i < maxRetry; i++ {
+ select {
+ // Attempts start from 1.
+ case attemptCh <- i + 1:
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(i))
+ }
+ }()
+ return attemptCh
+}
+
+// isNetErrorRetryable - is network error retryable.
+func isNetErrorRetryable(err error) bool {
+ switch err.(type) {
+ case net.Error:
+ switch err.(type) {
+ case *net.DNSError, *net.OpError, net.UnknownNetworkError:
+ return true
+ case *url.Error:
+ // For a URL error, where it replies back "connection closed"
+ // retry again.
+ if strings.Contains(err.Error(), "Connection closed by foreign host") {
+ return true
+ }
+ default:
+ if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
+ // If error is - tlsHandshakeTimeoutError, retry.
+ return true
+ } else if strings.Contains(err.Error(), "i/o timeout") {
+ // If error is - tcp timeoutError, retry.
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// List of AWS S3 error codes which are retryable.
+var retryableS3Codes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "InternalError": {},
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ // Add more AWS S3 codes here.
+}
+
+// isS3CodeRetryable - is s3 error code retryable.
+func isS3CodeRetryable(s3Code string) (ok bool) {
+ _, ok = retryableS3Codes[s3Code]
+ return ok
+}
+
+// List of HTTP status codes which are retryable.
+var retryableHTTPStatusCodes = map[int]struct{}{
+ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
+ http.StatusInternalServerError: {},
+ http.StatusBadGateway: {},
+ http.StatusServiceUnavailable: {},
+ // Add more HTTP status codes here.
+}
+
+// isHTTPStatusRetryable - is HTTP error code retryable.
+func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
+ _, ok = retryableHTTPStatusCodes[httpStatusCode]
+ return ok
+}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
new file mode 100644
index 000000000..3f159bd9d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -0,0 +1,44 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// awsS3EndpointMap Amazon S3 endpoint map.
+// "cn-north-1" adds support for AWS China.
+var awsS3EndpointMap = map[string]string{
+ "us-east-1": "s3.amazonaws.com",
+ "us-west-2": "s3-us-west-2.amazonaws.com",
+ "us-west-1": "s3-us-west-1.amazonaws.com",
+ "eu-west-1": "s3-eu-west-1.amazonaws.com",
+ "eu-central-1": "s3-eu-central-1.amazonaws.com",
+ "ap-south-1": "s3-ap-south-1.amazonaws.com",
+ "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
+ "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
+ "sa-east-1": "s3-sa-east-1.amazonaws.com",
+ "cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
+}
+
+// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
+func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
+ s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
+ if !ok {
+ // Default to 's3.amazonaws.com' endpoint.
+ s3Endpoint = "s3.amazonaws.com"
+ }
+ return s3Endpoint
+}
diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go
new file mode 100644
index 000000000..cae74cd01
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/signature-type.go
@@ -0,0 +1,37 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is Latest i.e SignatureV4.
+const (
+ Latest SignatureType = iota
+ SignatureV4
+ SignatureV2
+)
+
+// isV2 - is signature SignatureV2?
+func (s SignatureType) isV2() bool {
+ return s == SignatureV2
+}
+
+// isV4 - is signature SignatureV4?
+func (s SignatureType) isV4() bool {
+ return s == SignatureV4 || s == Latest
+}
diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go
new file mode 100644
index 000000000..65c7b0da1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/tempfile.go
@@ -0,0 +1,60 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io/ioutil"
+ "os"
+ "sync"
+)
+
+// tempFile - temporary file container.
+type tempFile struct {
+ *os.File
+ mutex *sync.Mutex
+}
+
+// newTempFile returns a new temporary file, once closed it automatically deletes itself.
+func newTempFile(prefix string) (*tempFile, error) {
+ // use platform specific temp directory.
+ file, err := ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ return nil, err
+ }
+ return &tempFile{
+ File: file,
+ mutex: &sync.Mutex{},
+ }, nil
+}
+
+// Close - closer wrapper to close and remove temporary file.
+func (t *tempFile) Close() error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ if t.File != nil {
+ // Close the file.
+ if err := t.File.Close(); err != nil {
+ return err
+ }
+ // Remove file.
+ if err := os.Remove(t.File.Name()); err != nil {
+ return err
+ }
+ t.File = nil
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/test-utils_test.go b/vendor/github.com/minio/minio-go/test-utils_test.go
new file mode 100644
index 000000000..179c28a23
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/test-utils_test.go
@@ -0,0 +1,64 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io/ioutil"
+ "net/http"
+)
+
+// Contains common used utilities for tests.
+
+// APIError Used for mocking error response from server.
+type APIError struct {
+ Code string
+ Description string
+ HTTPStatusCode int
+}
+
+// Mocks XML error response from the server.
+func generateErrorResponse(resp *http.Response, APIErr APIError, bucketName string) *http.Response {
+ // generate error response.
+ errorResponse := getAPIErrorResponse(APIErr, bucketName)
+ encodedErrorResponse := encodeResponse(errorResponse)
+ // write Header.
+ resp.StatusCode = APIErr.HTTPStatusCode
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
+
+ return resp
+}
+
+// getErrorResponse gets in standard error and resource value and
+// provides a encodable populated response values.
+func getAPIErrorResponse(err APIError, bucketName string) ErrorResponse {
+ var errResp = ErrorResponse{}
+ errResp.Code = err.Code
+ errResp.Message = err.Description
+ errResp.BucketName = bucketName
+ return errResp
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ encode := xml.NewEncoder(&bytesBuffer)
+ encode.Encode(response)
+ return bytesBuffer.Bytes()
+}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
new file mode 100644
index 000000000..2208d3603
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -0,0 +1,383 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumMD5 calculate sumMD5 sum for an input byte array.
+func sumMD5(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getEndpointURL - construct a new endpoint.
+func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
+ if strings.Contains(endpoint, ":") {
+ host, _, err := net.SplitHostPort(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ if !isValidIP(host) && !isValidDomain(host) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ } else {
+ if !isValidIP(endpoint) && !isValidDomain(endpoint) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ }
+ // If secure is false, use 'http' scheme.
+ scheme := "https"
+ if !secure {
+ scheme = "http"
+ }
+
+ // Construct a secured endpoint URL.
+ endpointURLStr := scheme + "://" + endpoint
+ endpointURL, err := url.Parse(endpointURLStr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate incoming endpoint URL.
+ if err := isValidEndpointURL(endpointURL.String()); err != nil {
+ return nil, err
+ }
+ return endpointURL, nil
+}
+
+// isValidDomain validates if input string is a valid domain name.
+func isValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// isValidIP parses input string for ip address validity.
+func isValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+// isVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func isVirtualHostSupported(endpointURL string, bucketName string) bool {
+ url, err := url.Parse(endpointURL)
+ if err != nil {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if url.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
+}
+
+// Match if it is exactly Amazon S3 endpoint.
+func isAmazonEndpoint(endpointURL string) bool {
+ if isAmazonChinaEndpoint(endpointURL) {
+ return true
+ }
+ url, err := url.Parse(endpointURL)
+ if err != nil {
+ return false
+ }
+ if url.Host == "s3.amazonaws.com" {
+ return true
+ }
+ return false
+}
+
+// Match if it is exactly Amazon S3 China endpoint.
+// Customers who wish to use the new Beijing Region are required
+// to sign up for a separate set of account credentials unique to
+// the China (Beijing) Region. Customers with existing AWS credentials
+// will not be able to access resources in the new Region, and vice versa.
+// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
+func isAmazonChinaEndpoint(endpointURL string) bool {
+ if endpointURL == "" {
+ return false
+ }
+ url, err := url.Parse(endpointURL)
+ if err != nil {
+ return false
+ }
+ if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
+ return true
+ }
+ return false
+}
+
+// Match if it is exactly Google cloud storage endpoint.
+func isGoogleEndpoint(endpointURL string) bool {
+ if endpointURL == "" {
+ return false
+ }
+ url, err := url.Parse(endpointURL)
+ if err != nil {
+ return false
+ }
+ if url.Host == "storage.googleapis.com" {
+ return true
+ }
+ return false
+}
+
+// Verify if input endpoint URL is valid.
+func isValidEndpointURL(endpointURL string) error {
+ if endpointURL == "" {
+ return ErrInvalidArgument("Endpoint url cannot be empty.")
+ }
+ url, err := url.Parse(endpointURL)
+ if err != nil {
+ return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
+ }
+ if url.Path != "/" && url.Path != "" {
+ return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
+ }
+ if strings.Contains(endpointURL, ".amazonaws.com") {
+ if !isAmazonEndpoint(endpointURL) {
+ return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
+ }
+ }
+ if strings.Contains(endpointURL, ".googleapis.com") {
+ if !isGoogleEndpoint(endpointURL) {
+ return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
+ }
+ }
+ return nil
+}
+
+// Verify if input expires value is valid.
+func isValidExpiry(expires time.Duration) error {
+ expireSeconds := int64(expires / time.Second)
+ if expireSeconds < 1 {
+ return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
+ }
+ if expireSeconds > 604800 {
+ return ErrInvalidArgument("Expires cannot be greater than 7 days.")
+ }
+ return nil
+}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+
+// isValidBucketName - verify bucket name in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func isValidBucketName(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" {
+ return ErrInvalidBucketName("Bucket name cannot be empty.")
+ }
+ if len(bucketName) < 3 {
+ return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
+ }
+ if len(bucketName) > 63 {
+ return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
+ }
+ if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
+ return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
+ }
+ if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
+ return ErrInvalidBucketName("Bucket name cannot have successive periods.")
+ }
+ if !validBucketName.MatchString(bucketName) {
+ return ErrInvalidBucketName("Bucket name contains invalid characters.")
+ }
+ return nil
+}
+
+// isValidObjectName - verify object name in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func isValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return ErrInvalidObjectName("Object name cannot be empty.")
+ }
+ if len(objectName) > 1024 {
+ return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
+ }
+ if !utf8.ValidString(objectName) {
+ return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
+ }
+ return nil
+}
+
+// isValidObjectPrefix - verify if object prefix is valid.
+func isValidObjectPrefix(objectPrefix string) error {
+ if len(objectPrefix) > 1024 {
+ return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
+ }
+ if !utf8.ValidString(objectPrefix) {
+ return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
+ }
+ return nil
+}
+
+//expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// queryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func queryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(urlEncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func urlEncodePath(pathName string) string {
+ // if object matches reserved string, no need to encode them
+ reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+ if reservedNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go
new file mode 100644
index 000000000..1a30d5441
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/utils_test.go
@@ -0,0 +1,436 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package minio
+
+import (
+ "fmt"
+ "net/url"
+ "testing"
+ "time"
+)
+
+// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
+func TestGetEndpointURL(t *testing.T) {
+ testCases := []struct {
+ // Inputs.
+ endPoint string
+ secure bool
+
+ // Expected result.
+ result string
+ err error
+ // Flag indicating whether the test is expected to pass or not.
+ shouldPass bool
+ }{
+ {"s3.amazonaws.com", true, "https://s3.amazonaws.com", nil, true},
+ {"s3.cn-north-1.amazonaws.com.cn", true, "https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"s3.amazonaws.com", false, "http://s3.amazonaws.com", nil, true},
+ {"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
+ {"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
+ {"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false},
+ {"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
+ {"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
+ {"s3.amazonaws.com:443", true, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
+ {"storage.googleapis.com:4000", true, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
+ {"s3.aamzza.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false},
+ {"", true, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false},
+ }
+
+ for i, testCase := range testCases {
+ result, err := getEndpointURL(testCase.endPoint, testCase.secure)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ if testCase.result != result.String() {
+ t.Errorf("Test %d: Expected the result Url to be \"%s\", but found \"%s\" instead", i+1, testCase.result, result.String())
+ }
+ }
+ }
+}
+
+// Tests for 'isValidDomain(host string) bool'.
+func TestIsValidDomain(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ host string
+ // Expected result.
+ result bool
+ }{
+ {"s3.amazonaws.com", true},
+ {"s3.cn-north-1.amazonaws.com.cn", true},
+ {"s3.amazonaws.com_", false},
+ {"%$$$", false},
+ {"s3.amz.test.com", true},
+ {"s3.%%", false},
+ {"localhost", true},
+ {"-localhost", false},
+ {"", false},
+ {"\n \t", false},
+ {" ", false},
+ }
+
+ for i, testCase := range testCases {
+ result := isValidDomain(testCase.host)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate end point validator.
+func TestIsValidEndpointURL(t *testing.T) {
+ testCases := []struct {
+ url string
+ err error
+ // Flag indicating whether the test is expected to pass or not.
+ shouldPass bool
+ }{
+ {"", fmt.Errorf("Endpoint url cannot be empty."), false},
+ {"/", nil, true},
+ {"https://s3.am1;4205;0cazonaws.com", nil, true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"https://s3.amazonaws.com/", nil, true},
+ {"https://storage.googleapis.com/", nil, true},
+ {"192.168.1.1", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ {"https://amazon.googleapis.com/", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
+ {"https://storage.googleapis.com/bucket/", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ {"https://z3.amazonaws.com", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
+ {"https://s3.amazonaws.com/bucket/object", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ }
+
+ for i, testCase := range testCases {
+ err := isValidEndpointURL(testCase.url)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+}
+
+// Tests validate IP address validator.
+func TestIsValidIP(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ ip string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", true},
+ {"192.168.1", false},
+ {"192.168.1.1.1", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ }
+
+ for i, testCase := range testCases {
+ result := isValidIP(testCase.ip)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
+ }
+ }
+
+}
+
+// Tests validate virtual host validator.
+func TestIsVirtualHostSupported(t *testing.T) {
+ testCases := []struct {
+ url string
+ bucket string
+ // Expeceted result.
+ result bool
+ }{
+ {"https://s3.amazonaws.com", "my-bucket", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
+ {"https://s3.amazonaws.com", "my-bucket.", false},
+ {"https://amazons3.amazonaws.com", "my-bucket.", false},
+ {"https://storage.googleapis.com/", "my-bucket", true},
+ {"https://mystorage.googleapis.com/", "my-bucket", false},
+ }
+
+ for i, testCase := range testCases {
+ result := isVirtualHostSupported(testCase.url, testCase.bucket)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
+ }
+ }
+}
+
+// Tests validate Amazon endpoint validator.
+func TestIsAmazonEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"https://s3.amazonaws.com", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ result := isAmazonEndpoint(testCase.url)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Amazon S3 China endpoint validator.
+func TestIsAmazonChinaEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // s3.amazonaws.com is not a valid Amazon S3 China end point.
+ {"https://s3.amazonaws.com", false},
+ // valid input.
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ result := isAmazonChinaEndpoint(testCase.url)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Google Cloud end point validator.
+func TestIsGoogleEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", false},
+ {"https://192.168.1.1", false},
+ {"s3.amazonaws.com", false},
+ {"http://s3.amazonaws.com", false},
+ {"https://s3.amazonaws.com", false},
+ {"https://s3.cn-north-1.amazonaws.com.cn", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"http://storage.googleapis.com", true},
+ {"https://storage.googleapis.com", true},
+ }
+
+ for i, testCase := range testCases {
+ result := isGoogleEndpoint(testCase.url)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate the expiry time validator.
+func TestIsValidExpiry(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ duration time.Duration
+ // Expected result.
+ err error
+ // Flag to indicate whether the test should pass.
+ shouldPass bool
+ }{
+ {100 * time.Millisecond, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
+ {604801 * time.Second, fmt.Errorf("Expires cannot be greater than 7 days."), false},
+ {0 * time.Second, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
+ {1 * time.Second, nil, true},
+ {10000 * time.Second, nil, true},
+ {999 * time.Second, nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := isValidExpiry(testCase.duration)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+}
+
+// Tests validate the bucket name validator.
+func TestIsValidBucketName(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ bucketName string
+ // Expected result.
+ err error
+ // Flag to indicate whether test should Pass.
+ shouldPass bool
+ }{
+ {".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters."), false},
+ {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
+ {"", ErrInvalidBucketName("Bucket name cannot be empty."), false},
+ {"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
+ {"my.bucket.com", nil, true},
+ {"my-bucket", nil, true},
+ {"123my-bucket", nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := isValidBucketName(testCase.bucketName)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+
+}
+
+func TestPercentEncodeSlash(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {"test123", "test123"},
+ {"abc,+_1", "abc,+_1"},
+ {"%40prefix=test%40123", "%40prefix=test%40123"},
+ {"key1=val1/val2", "key1=val1%2Fval2"},
+ {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
+ }
+
+ for i, testCase := range testCases {
+ receivedOutput := percentEncodeSlash(testCase.input)
+ if testCase.output != receivedOutput {
+ t.Errorf(
+ "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
+ i+1, testCase.input, testCase.output,
+ receivedOutput,
+ )
+
+ }
+ }
+}
+
+// Tests validate the query encoder.
+func TestQueryEncode(t *testing.T) {
+ testCases := []struct {
+ queryKey string
+ valueToEncode []string
+ // Expected result.
+ result string
+ }{
+ {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
+ {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
+ {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
+ {"prefix", []string{"test#123"}, "prefix=test%23123"},
+ {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
+ {"prefix", []string{"test123"}, "prefix=test123"},
+ {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
+ }
+
+ for i, testCase := range testCases {
+ urlValues := make(url.Values)
+ for _, valueToEncode := range testCase.valueToEncode {
+ urlValues.Add(testCase.queryKey, valueToEncode)
+ }
+ result := queryEncode(urlValues)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate the URL path encoder.
+func TestUrlEncodePath(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ inputStr string
+ // Expected result.
+ result string
+ }{
+ {"thisisthe%url", "thisisthe%25url"},
+ {"本語", "%E6%9C%AC%E8%AA%9E"},
+ {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
+ {">123", "%3E123"},
+ {"myurl#link", "myurl%23link"},
+ {"space in url", "space%20in%20url"},
+ {"url+path", "url%2Bpath"},
+ }
+
+ for i, testCase := range testCases {
+ result := urlEncodePath(testCase.inputStr)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
diff --git a/vendor/github.com/vaughan0/go-ini/LICENSE b/vendor/github.com/vaughan0/go-ini/LICENSE
deleted file mode 100644
index 968b45384..000000000
--- a/vendor/github.com/vaughan0/go-ini/LICENSE
+++ /dev/null
@@ -1,14 +0,0 @@
-Copyright (c) 2013 Vaughan Newton
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
-Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/vaughan0/go-ini/README.md b/vendor/github.com/vaughan0/go-ini/README.md
deleted file mode 100644
index d5cd4e74b..000000000
--- a/vendor/github.com/vaughan0/go-ini/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-go-ini
-======
-
-INI parsing library for Go (golang).
-
-View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
-
-Usage
------
-
-Parse an INI file:
-
-```go
-import "github.com/vaughan0/go-ini"
-
-file, err := ini.LoadFile("myfile.ini")
-```
-
-Get data from the parsed file:
-
-```go
-name, ok := file.Get("person", "name")
-if !ok {
- panic("'name' variable missing from 'person' section")
-}
-```
-
-Iterate through values in a section:
-
-```go
-for key, value := range file["mysection"] {
- fmt.Printf("%s => %s\n", key, value)
-}
-```
-
-Iterate through sections in a file:
-
-```go
-for name, section := range file {
- fmt.Printf("Section name: %s\n", name)
-}
-```
-
-File Format
------------
-
-INI files are parsed by go-ini line-by-line. Each line may be one of the following:
-
- * A section definition: [section-name]
- * A property: key = value
- * A comment: #blahblah _or_ ;blahblah
- * Blank. The line will be ignored.
-
-Properties defined before any section headers are placed in the default section, which has
-the empty string as it's key.
-
-Example:
-
-```ini
-# I am a comment
-; So am I!
-
-[apples]
-colour = red or green
-shape = applish
-
-[oranges]
-shape = square
-colour = blue
-```
diff --git a/vendor/github.com/vaughan0/go-ini/ini.go b/vendor/github.com/vaughan0/go-ini/ini.go
deleted file mode 100644
index 81aeb32f8..000000000
--- a/vendor/github.com/vaughan0/go-ini/ini.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Package ini provides functions for parsing INI configuration files.
-package ini
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
- assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
-)
-
-// ErrSyntax is returned when there is a syntax error in an INI file.
-type ErrSyntax struct {
- Line int
- Source string // The contents of the erroneous line, without leading or trailing whitespace
-}
-
-func (e ErrSyntax) Error() string {
- return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
-}
-
-// A File represents a parsed INI file.
-type File map[string]Section
-
-// A Section represents a single section of an INI file.
-type Section map[string]string
-
-// Returns a named Section. A Section will be created if one does not already exist for the given name.
-func (f File) Section(name string) Section {
- section := f[name]
- if section == nil {
- section = make(Section)
- f[name] = section
- }
- return section
-}
-
-// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
-func (f File) Get(section, key string) (value string, ok bool) {
- if s := f[section]; s != nil {
- value, ok = s[key]
- }
- return
-}
-
-// Loads INI data from a reader and stores the data in the File.
-func (f File) Load(in io.Reader) (err error) {
- bufin, ok := in.(*bufio.Reader)
- if !ok {
- bufin = bufio.NewReader(in)
- }
- return parseFile(bufin, f)
-}
-
-// Loads INI data from a named file and stores the data in the File.
-func (f File) LoadFile(file string) (err error) {
- in, err := os.Open(file)
- if err != nil {
- return
- }
- defer in.Close()
- return f.Load(in)
-}
-
-func parseFile(in *bufio.Reader, file File) (err error) {
- section := ""
- lineNum := 0
- for done := false; !done; {
- var line string
- if line, err = in.ReadString('\n'); err != nil {
- if err == io.EOF {
- done = true
- } else {
- return
- }
- }
- lineNum++
- line = strings.TrimSpace(line)
- if len(line) == 0 {
- // Skip blank lines
- continue
- }
- if line[0] == ';' || line[0] == '#' {
- // Skip comments
- continue
- }
-
- if groups := assignRegex.FindStringSubmatch(line); groups != nil {
- key, val := groups[1], groups[2]
- key, val = strings.TrimSpace(key), strings.TrimSpace(val)
- file.Section(section)[key] = val
- } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
- name := strings.TrimSpace(groups[1])
- section = name
- // Create the section if it does not exist
- file.Section(section)
- } else {
- return ErrSyntax{lineNum, line}
- }
-
- }
- return nil
-}
-
-// Loads and returns a File from a reader.
-func Load(in io.Reader) (File, error) {
- file := make(File)
- err := file.Load(in)
- return file, err
-}
-
-// Loads and returns an INI File from a file on disk.
-func LoadFile(filename string) (File, error) {
- file := make(File)
- err := file.LoadFile(filename)
- return file, err
-}
diff --git a/vendor/github.com/vaughan0/go-ini/ini_linux_test.go b/vendor/github.com/vaughan0/go-ini/ini_linux_test.go
deleted file mode 100644
index 38a6f0004..000000000
--- a/vendor/github.com/vaughan0/go-ini/ini_linux_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package ini
-
-import (
- "reflect"
- "syscall"
- "testing"
-)
-
-func TestLoadFile(t *testing.T) {
- originalOpenFiles := numFilesOpen(t)
-
- file, err := LoadFile("test.ini")
- if err != nil {
- t.Fatal(err)
- }
-
- if originalOpenFiles != numFilesOpen(t) {
- t.Error("test.ini not closed")
- }
-
- if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
- t.Error("file not read correctly")
- }
-}
-
-func numFilesOpen(t *testing.T) (num uint64) {
- var rlimit syscall.Rlimit
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
- if err != nil {
- t.Fatal(err)
- }
- maxFds := int(rlimit.Cur)
-
- var stat syscall.Stat_t
- for i := 0; i < maxFds; i++ {
- if syscall.Fstat(i, &stat) == nil {
- num++
- } else {
- return
- }
- }
- return
-}
diff --git a/vendor/github.com/vaughan0/go-ini/ini_test.go b/vendor/github.com/vaughan0/go-ini/ini_test.go
deleted file mode 100644
index 06a4d05ea..000000000
--- a/vendor/github.com/vaughan0/go-ini/ini_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package ini
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-func TestLoad(t *testing.T) {
- src := `
- # Comments are ignored
-
- herp = derp
-
- [foo]
- hello=world
- whitespace should = not matter
- ; sneaky semicolon-style comment
- multiple = equals = signs
-
- [bar]
- this = that`
-
- file, err := Load(strings.NewReader(src))
- if err != nil {
- t.Fatal(err)
- }
- check := func(section, key, expect string) {
- if value, _ := file.Get(section, key); value != expect {
- t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
- }
- }
-
- check("", "herp", "derp")
- check("foo", "hello", "world")
- check("foo", "whitespace should", "not matter")
- check("foo", "multiple", "equals = signs")
- check("bar", "this", "that")
-}
-
-func TestSyntaxError(t *testing.T) {
- src := `
- # Line 2
- [foo]
- bar = baz
- # Here's an error on line 6:
- wut?
- herp = derp`
- _, err := Load(strings.NewReader(src))
- t.Logf("%T: %v", err, err)
- if err == nil {
- t.Fatal("expected an error, got nil")
- }
- syntaxErr, ok := err.(ErrSyntax)
- if !ok {
- t.Fatal("expected an error of type ErrSyntax")
- }
- if syntaxErr.Line != 6 {
- t.Fatal("incorrect line number")
- }
- if syntaxErr.Source != "wut?" {
- t.Fatal("incorrect source")
- }
-}
-
-func TestDefinedSectionBehaviour(t *testing.T) {
- check := func(src string, expect File) {
- file, err := Load(strings.NewReader(src))
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(file, expect) {
- t.Errorf("expected %v, got %v", expect, file)
- }
- }
- // No sections for an empty file
- check("", File{})
- // Default section only if there are actually values for it
- check("foo=bar", File{"": {"foo": "bar"}})
- // User-defined sections should always be present, even if empty
- check("[a]\n[b]\nfoo=bar", File{
- "a": {},
- "b": {"foo": "bar"},
- })
- check("foo=bar\n[a]\nthis=that", File{
- "": {"foo": "bar"},
- "a": {"this": "that"},
- })
-}
diff --git a/vendor/github.com/vaughan0/go-ini/test.ini b/vendor/github.com/vaughan0/go-ini/test.ini
deleted file mode 100644
index d13c999e2..000000000
--- a/vendor/github.com/vaughan0/go-ini/test.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[default]
-stuff = things