summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-02-02 09:32:00 -0500
committerHarrison Healey <harrisonmhealey@gmail.com>2017-02-02 09:32:00 -0500
commit701d1ab638b23c24877fc41824add66232446676 (patch)
treeec120c88d38ac9d38d9eabdd3270b52bb6ac9d96 /vendor/github.com/minio
parentca3211bc04f6dea34e8168217182637d1419f998 (diff)
downloadchat-701d1ab638b23c24877fc41824add66232446676.tar.gz
chat-701d1ab638b23c24877fc41824add66232446676.tar.bz2
chat-701d1ab638b23c24877fc41824add66232446676.zip
Updating server dependancies (#5249)
Diffstat (limited to 'vendor/github.com/minio')
-rw-r--r--vendor/github.com/minio/minio-go/.travis.yml6
-rw-r--r--vendor/github.com/minio/minio-go/README.md28
-rw-r--r--vendor/github.com/minio/minio-go/api-datatypes.go9
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-list.go1
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go29
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go11
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go13
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket_test.go17
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go72
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go8
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file.go121
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go54
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-progress.go25
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-readat.go135
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go46
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go11
-rw-r--r--vendor/github.com/minio/minio-go/api-s3-datatypes.go9
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go74
-rw-r--r--vendor/github.com/minio/minio-go/api.go113
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v2_test.go44
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v4_test.go210
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go92
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go19
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache_test.go12
-rw-r--r--vendor/github.com/minio/minio-go/bucket-notification.go2
-rw-r--r--vendor/github.com/minio/minio-go/constants.go6
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md347
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go9
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go (renamed from vendor/github.com/minio/minio-go/request-signature-v2.go)28
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go (renamed from vendor/github.com/minio/minio-go/request-signature-v2_test.go)2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go (renamed from vendor/github.com/minio/minio-go/request-signature-v4.go)26
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go70
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils.go39
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go66
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go195
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go284
-rw-r--r--vendor/github.com/minio/minio-go/post-policy.go18
-rw-r--r--vendor/github.com/minio/minio-go/retry-continous.go52
-rw-r--r--vendor/github.com/minio/minio-go/s3-endpoints.go3
-rw-r--r--vendor/github.com/minio/minio-go/test-utils_test.go10
-rw-r--r--vendor/github.com/minio/minio-go/utils.go226
-rw-r--r--vendor/github.com/minio/minio-go/utils_test.go280
44 files changed, 1712 insertions, 1118 deletions
diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml
index f61da45b6..0c353ba76 100644
--- a/vendor/github.com/minio/minio-go/.travis.yml
+++ b/vendor/github.com/minio/minio-go/.travis.yml
@@ -3,7 +3,6 @@ language: go
os:
- linux
-- osx
env:
- ARCH=x86_64
@@ -12,12 +11,9 @@ env:
go:
- 1.5.3
- 1.6
+- 1.7.4
script:
- diff -au <(gofmt -d .) <(printf "")
- go vet ./...
- go test -short -race -v ./...
-
-notifications:
- slack:
- secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8=
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index 16ed88685..f0d880b1e 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -1,5 +1,5 @@
-# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
@@ -14,22 +14,21 @@ The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compati
- Ceph Object Gateway
- Riak CS
-This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
-This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
+This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github
```sh
-$ go get -u github.com/minio/minio-go
+go get -u github.com/minio/minio-go
```
## Initialize Minio Client
-You need four items to connect to Minio object storage server.
-
+Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
| Parameter | Description|
@@ -68,7 +67,7 @@ func main() {
## Quick Start Example - File Uploader
-This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
+This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
@@ -132,11 +131,11 @@ func main() {
```sh
-$ go run file-uploader.go
+go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
-$ mc ls play/mymusic/
+mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
@@ -191,6 +190,13 @@ The full API Reference is available here.
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+### API Reference : Client custom settings
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+
## Full Examples
#### Full Examples : Bucket Operations
@@ -238,7 +244,7 @@ The full API Reference is available here.
## Explore Further
* [Complete Documentation](https://docs.minio.io)
-* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
## Contribute
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
index 0871b1cfb..ab2aa4af2 100644
--- a/vendor/github.com/minio/minio-go/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -16,7 +16,10 @@
package minio
-import "time"
+import (
+ "net/http"
+ "time"
+)
// BucketInfo container for bucket metadata.
type BucketInfo struct {
@@ -38,6 +41,10 @@ type ObjectInfo struct {
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+ // Collection of additional metadata on the object.
+ // eg: x-amz-meta-*, content-encoding etc.
+ Metadata http.Header `json:"metadata"`
+
// Owner name.
Owner struct {
DisplayName string `json:"name"`
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index 1f0dabb05..c9b4dcedd 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -275,7 +275,7 @@ func (o *Object) setOffset(bytesRead int64) error {
return nil
}
-// Read reads up to len(p) bytes into p. It returns the number of
+// Read reads up to len(b) bytes into b. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
@@ -343,7 +343,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
return response.Size, err
}
-// Stat returns the ObjectInfo structure describing object.
+// Stat returns the ObjectInfo structure describing Object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index 07b1fa483..da0a409cd 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -41,7 +41,7 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
}
-// GetBucketPolicy - get bucket policy rules at a given path.
+// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
@@ -57,7 +57,7 @@ func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolic
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
-// Request server for policy.
+// Request server for current bucket policy.
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
index 795de6183..adfaa0a7a 100644
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -482,6 +482,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
+ continue
}
}
select {
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
index 85e57805b..9c2a2ebd2 100644
--- a/vendor/github.com/minio/minio-go/api-notification.go
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -22,6 +22,9 @@ import (
"io"
"net/http"
"net/url"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketNotification - get bucket notification at a given path.
@@ -135,7 +138,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
}
// Check ARN partition to verify if listening bucket is supported
- if isAmazonEndpoint(c.endpointURL) || isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
}
@@ -143,7 +146,14 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
}
// Continously run and listen on bucket notification.
- for {
+ // Create a done channel to control 'ListObjects' go routine.
+ retryDoneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(retryDoneCh)
+
+ // Wait on the jitter retry loop.
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
@@ -155,10 +165,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
queryValues: urlValues,
})
if err != nil {
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
+ continue
}
// Validate http response, upon error return quickly.
@@ -180,10 +187,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
+ continue
}
// Send notifications on channel only if there are events received.
if len(notificationInfo.Records) > 0 {
@@ -200,12 +204,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
- continue
}
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
}
}
}(notificationInfoCh)
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index 200f33e9b..f9d05ab9b 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -20,6 +20,9 @@ import (
"errors"
"net/url"
"time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// supportedGetReqParams - supported request parameters for GET presigned request.
@@ -126,14 +129,14 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
}
// Sign the policy.
- p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
+ p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return u, p.formData, nil
}
@@ -156,7 +159,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
- credential := getCredential(c.accessKeyID, location, t)
+ credential := s3signer.GetCredential(c.accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@@ -172,6 +175,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return u, p.formData, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index 3c9f438ef..7c7e03f49 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -26,8 +26,10 @@ import (
"io/ioutil"
"net/http"
"net/url"
+ "path"
"github.com/minio/minio-go/pkg/policy"
+ "github.com/minio/minio-go/pkg/s3signer"
)
/// Bucket operations
@@ -89,11 +91,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// is the preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint data with
// the request.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- targetURL.Path = "/" + bucketName + "/"
+ targetURL := c.endpointURL
+ targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
@@ -133,9 +132,9 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket_test.go b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
index a1899fbe2..ec33c8492 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket_test.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
@@ -24,8 +24,10 @@ import (
"io"
"io/ioutil"
"net/http"
- "net/url"
+ "path"
"testing"
+
+ "github.com/minio/minio-go/pkg/s3signer"
)
// Tests validate http request formulated for creation of bucket.
@@ -33,14 +35,11 @@ func TestMakeBucketRequest(t *testing.T) {
// Generates expected http request for bucket creation.
// Used for asserting with the actual request generated.
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
-
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- targetURL.Path = "/" + bucketName + "/"
+ targetURL := c.endpointURL
+ targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
+ var err error
req, err = http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
@@ -78,9 +77,9 @@ func TestMakeBucketRequest(t *testing.T) {
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 2eaef2e30..5f5f568e6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -44,18 +44,17 @@ func isReadAt(reader io.Reader) (ok bool) {
}
// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
+func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found should upload the part.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
+ if uploadReq.Part == nil {
return true
}
// if size mismatches should upload the part.
- if objPart.Size != uploadedPart.Size {
+ if objPart.Size != uploadReq.Part.Size {
return true
}
// if md5sum mismatches should upload the part.
- if objPart.ETag != uploadedPart.ETag {
+ if objPart.ETag != uploadReq.Part.ETag {
return true
}
return false
@@ -68,7 +67,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
-// minPartSize - 5MiB
+// minPartSize - 64MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
@@ -167,37 +166,64 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
-func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
+func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
- return "", false, err
+ return "", err
}
if err := isValidObjectName(objectName); err != nil {
- return "", false, err
+ return "", err
}
- // Set content Type to default if empty string.
- if contentType == "" {
- contentType = "application/octet-stream"
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
+ if err != nil {
+ return "", err
}
+ return initMultipartUploadResult.UploadID, nil
+}
+
+// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
+// or initiate a new multipart session if no current one found
+func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) {
+ // A map of all uploaded parts.
+ var partsInfo map[int]objectPart
+ var err error
- // Find upload id for previous upload for an object.
- uploadID, err = c.findUploadID(bucketName, objectName)
+ uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
- return "", false, err
+ return "", nil, err
}
+
if uploadID == "" {
- // Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
+ // Initiates a new multipart request
+ uploadID, err = c.newUploadID(bucketName, objectName, metaData)
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ // Fetch previously upload parts and maximum part size.
+ partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
- return "", false, err
+ // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
+ // initiate a new multipart upload
+ if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
+ uploadID, err = c.newUploadID(bucketName, objectName, metaData)
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ return "", nil, err
+ }
}
- // Save the new upload id.
- uploadID = initMultipartUploadResult.UploadID
- // Indicate that this is a new upload id.
- isNew = true
}
- return uploadID, isNew, nil
+
+ // Allocate partsInfo if not done yet
+ if partsInfo == nil {
+ partsInfo = make(map[int]objectPart)
+ }
+
+ return uploadID, partsInfo, nil
}
// computeHash - Calculates hashes for an input read Seeker.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index c7cd46d08..56978d427 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -16,7 +16,11 @@
package minio
-import "net/http"
+import (
+ "net/http"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
@@ -38,7 +42,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
}
// Set copy source.
- customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
+ customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
index deaed0acd..aa554b321 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -28,6 +28,8 @@ import (
"os"
"path/filepath"
"sort"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
@@ -62,6 +64,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
+ objMetadata := make(map[string][]string)
+
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
if contentType == "" {
@@ -70,9 +74,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
+ objMetadata["Content-Type"] = []string{contentType}
+
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -82,11 +88,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -97,15 +103,15 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minPartSize && fileSize >= 0 {
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Upload all large objects as multipart.
- n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -116,7 +122,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
return n, err
}
@@ -131,7 +137,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
-func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
+func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -140,9 +146,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return 0, err
}
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@@ -153,19 +158,6 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // If this session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
@@ -178,14 +170,19 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Create a channel to communicate which part to upload.
// Buffer this to 10000, the maximum number of parts allowed by S3.
- uploadPartsCh := make(chan int, 10000)
+ uploadPartsCh := make(chan uploadPartReq, 10000)
// Just for readability.
lastPartNumber := totalPartsCount
// Send each part through the partUploadCh to be uploaded.
for p := 1; p <= totalPartsCount; p++ {
- uploadPartsCh <- p
+ part, ok := partsInfo[p]
+ if ok {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
+ } else {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
}
close(uploadPartsCh)
@@ -193,7 +190,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
for w := 1; w <= 3; w++ {
go func() {
// Deal with each part as it comes through the channel.
- for partNumber := range uploadPartsCh {
+ for uploadReq := range uploadPartsCh {
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
@@ -203,47 +200,50 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
hashAlgos["sha256"] = sha256.New()
}
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (fileSize - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
+ var prtSize int64
+ var err error
+
+ prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
// Create the part to be uploaded.
verifyObjPart := objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
- PartNumber: partNumber,
+ PartNumber: uploadReq.PartNum,
Size: partSize,
}
+
// If this is the last part do not give it the full part size.
- if partNumber == lastPartNumber {
+ if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Verify if part should be uploaded.
- if shouldUploadPart(verifyObjPart, partsInfo) {
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (fileSize - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
- var prtSize int64
- prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
+ if shouldUploadPart(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
@@ -252,12 +252,13 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return
}
// Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
+ uploadReq.Part = &objPart
}
// Return through the channel the part size.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
- PartNum: partNumber,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
Error: nil,
}
}
@@ -271,8 +272,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
- part, ok := partsInfo[uploadRes.PartNum]
- if !ok {
+ part := uploadRes.Part
+ if part == nil {
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the total uploaded size.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index cdd3f53c2..f74eae626 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -45,11 +45,11 @@ import (
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
+ return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
@@ -58,17 +58,17 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
+ return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
-// putObjectStream uploads files bigger than 5MiB, and also supports
+// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -83,27 +83,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all previously uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // getUploadID for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
- // If This session is a continuation of a previous session fetch all
- // previously uploaded parts info and as a special case only fetch partsInfo
- // for only known upload size.
- if !isNew {
- // Fetch previously uploaded parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
@@ -139,12 +124,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// as we read from the source.
reader = newHook(tmpBuffer, progress)
+ part, ok := partsInfo[partNumber]
+
// Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
+ if !ok || shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
- }, partsInfo) {
+ }, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
@@ -212,7 +199,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@@ -225,13 +212,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
urlValues := make(url.Values)
urlValues.Set("uploads", "")
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
// Set ContentType header.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // Set a default content-type header if the latter is not provided
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
reqMetadata := requestMetadata{
bucketName: bucketName,
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
index 0f79e708f..42f8ce4d1 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-progress.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go
@@ -19,10 +19,19 @@ package minio
import (
"io"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// PutObjectWithProgress - With progress.
+// PutObjectWithProgress - with progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
+ metaData := make(map[string][]string)
+ metaData["Content-Type"] = []string{contentType}
+ return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
+}
+
+// PutObjectWithMetadata - with metadata.
+func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -50,7 +59,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -63,11 +72,11 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -81,15 +90,15 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// putSmall object.
if size < minPartSize && size >= 0 {
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
- n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
+ n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -100,7 +109,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
return n, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
index 14fa4b296..4ab1095f6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-readat.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go
@@ -32,17 +32,22 @@ type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
+ Part *objectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part *objectPart // Size of the part uploaded.
}
// shouldUploadPartReadAt - verify if part should be uploaded.
-func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
+func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found part should be uploaded.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
+ if uploadReq.Part == nil {
return true
}
// if size mismatches part should be uploaded.
- if uploadedPart.Size != objPart.Size {
+ if uploadReq.Part.Size != objPart.Size {
return true
}
return false
@@ -58,7 +63,7 @@ func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart)
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -67,9 +72,8 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return 0, err
}
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@@ -80,17 +84,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // Fetch all parts info previously uploaded.
- if !isNew {
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
@@ -103,7 +96,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
- uploadPartsCh := make(chan int, 10000)
+ uploadPartsCh := make(chan uploadPartReq, 10000)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
@@ -112,7 +105,12 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
- uploadPartsCh <- p
+ part, ok := partsInfo[p]
+ if ok {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
+ } else {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
}
close(uploadPartsCh)
@@ -123,64 +121,65 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
readAtBuffer := make([]byte, optimalReadBufferSize)
// Each worker will draw from the part channel and upload in parallel.
- for partNumber := range uploadPartsCh {
+ for uploadReq := range uploadPartsCh {
// Declare a new tmpBuffer.
tmpBuffer := new(bytes.Buffer)
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
+
+ // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
+ // Sha256 is avoided in non-v4 signature requests or HTTPS connections
+ hashSums := make(map[string][]byte)
+ hashAlgos := make(map[string]hash.Hash)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ var prtSize int64
+ var err error
+ prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
+ if err != nil {
+ // Send the error back through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
// Verify object if its uploaded.
verifyObjPart := objectPart{
- PartNumber: partNumber,
+ PartNumber: uploadReq.PartNum,
Size: partSize,
}
// Special case if we see a last part number, save last part
// size as the proper part size.
- if partNumber == lastPartNumber {
+ if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Only upload the necessary parts. Otherwise return size through channel
// to update any progress bar.
- if shouldUploadPartReadAt(verifyObjPart, partsInfo) {
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
-
- // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
- // Sha256 is avoided in non-v4 signature requests or HTTPS connections
- hashSums := make(map[string][]byte)
- hashAlgos := make(map[string]hash.Hash)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
-
- var prtSize int64
- prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- // Send the error back through the channel.
- uploadedPartsCh <- uploadedPartRes{
- Size: 0,
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
+ if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
@@ -190,12 +189,13 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return
}
// Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
+ uploadReq.Part = &objPart
}
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
- PartNum: partNumber,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
Error: nil,
}
}
@@ -210,8 +210,9 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
- part, ok := partsInfo[uploadRes.PartNum]
- if !ok {
+ // part, ok := partsInfo[uploadRes.PartNum]
+ part := uploadRes.Part
+ if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index f7dd2daf1..a779fbebe 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -103,11 +103,10 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
- case "stdin":
- fallthrough
- case "stdout":
- fallthrough
- case "stderr":
+ case "stdin", "stdout", "stderr":
+ return
+ // Ignore read/write stream of os.Pipe() which have unknown length too.
+ case "|0", "|1":
return
}
size = st.Size()
@@ -151,7 +150,7 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -169,7 +168,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
if err != nil {
return 0, err
}
@@ -181,7 +180,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
-func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -237,7 +236,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
}
}
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
return 0, err
}
@@ -255,7 +254,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -272,13 +271,20 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
-
// Set headers.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+
+ // Set metadata to headers
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // If Content-Type is not provided, set the default application/octet-stream one
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
// Populate request metadata.
reqMetadata := requestMetadata{
@@ -303,13 +309,13 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
}
}
- var metadata ObjectInfo
+ var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
- metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
+ objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
- metadata.Size = size
+ objInfo.Size = size
// Return here.
- return metadata, nil
+ return objInfo, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index 110a73e99..2ca84458e 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -71,6 +71,13 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err != nil {
return err
}
+ if resp != nil {
+ // if some unexpected error happened and max retry is reached, we want to let client know
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
@@ -164,6 +171,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
break
}
}
+ if count == 0 {
+ // Multi Objects Delete API doesn't accept empty object list, quit immediatly
+ break
+ }
if count < maxEntries {
// We didn't have 1000 entries, so this is the last batch
finish = true
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
index 52e8a120d..a34f82e97 100644
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -210,15 +210,16 @@ type createBucketConfiguration struct {
// deleteObject container for Delete element in MultiObjects Delete XML request
type deleteObject struct {
Key string
- VersionId string `xml:"VersionId,omitempty"`
+ VersionID string `xml:"VersionId,omitempty"`
}
// deletedObject container for Deleted element in MultiObjects Delete XML response
type deletedObject struct {
- Key string
- VersionId string `xml:"VersionId,omitempty"`
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+ // These fields are ignored.
DeleteMarker bool
- DeleteMarkerVersionId string
+ DeleteMarkerVersionID string
}
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 976d61241..e3bb115d4 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -21,6 +21,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
@@ -49,6 +51,31 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
return true, nil
}
+// List of header keys to be filtered, usually
+// from all S3 API http responses.
+var defaultFilterKeys = []string{
+ "Transfer-Encoding",
+ "Accept-Ranges",
+ "Date",
+ "Server",
+ "Vary",
+ "x-amz-request-id",
+ "x-amz-id-2",
+ // Add new headers to be ignored.
+}
+
+// Extract only necessary metadata header key/values by
+// filtering them out with a list of custom header keys.
+func extractObjMetadata(header http.Header) http.Header {
+ filterKeys := append([]string{
+ "ETag",
+ "Content-Length",
+ "Last-Modified",
+ "Content-Type",
+ }, defaultFilterKeys...)
+ return filterHeader(header, filterKeys)
+}
+
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
@@ -78,17 +105,21 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
- // Parse content length.
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
+ // Content-Length is not valid for Google Cloud Storage, do not verify.
+ var size int64 = -1
+ if !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ // Parse content length.
+ size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
}
}
// Parse Last-Modified has http time format.
@@ -109,12 +140,19 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if contentType == "" {
contentType = "application/octet-stream"
}
+
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ metadata := extractObjMetadata(resp.Header)
+
// Save object metadata info.
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = size
- objectStat.LastModified = date
- objectStat.ContentType = contentType
- return objectStat, nil
+ return ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: size,
+ LastModified: date,
+ ContentType: contentType,
+ Metadata: metadata,
+ }, nil
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 954927084..98829cd2c 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -33,12 +33,18 @@ import (
"strings"
"sync"
"time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
+ // Parsed endpoint url provided by the user.
+ endpointURL url.URL
+
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
@@ -53,7 +59,6 @@ type Client struct {
appName string
appVersion string
}
- endpointURL string
// Indicate whether we are using https or not
secure bool
@@ -73,7 +78,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "2.0.2"
+ libraryVersion = "2.0.3"
)
// User Agent should always following the below style.
@@ -116,13 +121,12 @@ func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Cl
if err != nil {
return nil, err
}
- // Google cloud storage should be set to signature V2, force it if
- // not.
- if isGoogleEndpoint(clnt.endpointURL) {
+ // Google cloud storage should be set to signature V2, force it if not.
+ if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.n
- if isAmazonEndpoint(clnt.endpointURL) {
+ if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
return clnt, nil
@@ -151,6 +155,18 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
+// redirectHeaders copies all headers when following a redirect URL.
+// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
+func redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) == 0 {
+ return nil
+ }
+ for key, val := range via[0].Header {
+ req.Header[key] = val
+ }
+ return nil
+}
+
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
@@ -170,11 +186,12 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
- clnt.endpointURL = endpointURL.String()
+ clnt.endpointURL = *endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- Transport: http.DefaultTransport,
+ Transport: http.DefaultTransport,
+ CheckRedirect: redirectHeaders,
}
// Instantiae bucket location cache.
@@ -262,6 +279,12 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
+
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
// For anonymous requests, no need to filter.
@@ -281,11 +304,9 @@ func (c Client) filterSignature(req *http.Request) {
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
- regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
- regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
@@ -364,20 +385,35 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
- // do the request.
- resp, err := c.httpClient.Do(req)
- if err != nil {
- // Handle this specifically for now until future Golang
- // versions fix this issue properly.
- urlErr, ok := err.(*url.Error)
- if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
- return nil, &url.Error{
- Op: urlErr.Op,
- URL: urlErr.URL,
- Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ var resp *http.Response
+ var err error
+ // Do the request in a loop in case of 307 http is met since golang still doesn't
+ // handle properly this situation (https://github.com/golang/go/issues/7912)
+ for {
+ resp, err = c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang
+ // versions fix this issue properly.
+ urlErr, ok := err.(*url.Error)
+ if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ }
}
+ return nil, err
+ }
+ // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
+ if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
+ newURL, err := url.Parse(resp.Header.Get("Location"))
+ if err != nil {
+ break
+ }
+ req.URL = newURL
+ } else {
+ break
}
- return nil, err
}
// Response cannot be non-nil, report if its the case.
@@ -467,6 +503,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
+ // res.Body should be closed
+ closeResponse(res)
if err != nil {
return nil, err
}
@@ -512,7 +550,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// Default all requests to "us-east-1" or "cn-north-1" (china region)
location := "us-east-1"
- if isAmazonChinaEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@@ -550,10 +588,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
if c.signature.isV2() {
// Presign URL with signature v2.
- req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
+ req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else {
// Presign URL with signature v4.
- req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
@@ -566,7 +604,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
// Skip setting 'expect' header for Google Cloud Storage, there
// are some known issues - https://github.com/restic/restic/issues/520
- if !isGoogleEndpoint(c.endpointURL) {
+ if !s3utils.IsGoogleEndpoint(c.endpointURL) {
// Set 'Expect' header for the request.
req.Header.Set("Expect", "100-continue")
}
@@ -610,10 +648,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
if !c.anonymous {
if c.signature.isV2() {
// Add signature version '2' authorization header.
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() {
// Add signature version '4' authorization header.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
}
}
@@ -631,26 +669,21 @@ func (c Client) setUserAgent(req *http.Request) {
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
- // Save host.
- url, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- host := url.Host
+ host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
- if isAmazonEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
// Save scheme.
- scheme := url.Scheme
+ scheme := c.endpointURL.Scheme
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
+ isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
@@ -658,19 +691,19 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
- urlStr = urlStr + "?" + queryEncode(queryValues)
+ urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
index 2084ffef7..23713732a 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v2_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
@@ -42,10 +42,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -88,10 +88,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -168,10 +168,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -238,10 +238,10 @@ func TestResumablePutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -350,10 +350,10 @@ func TestFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -499,10 +499,10 @@ func TestResumableFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -577,10 +577,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -628,10 +628,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -761,10 +761,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -897,10 +897,10 @@ func TestCopyObjectV2(t *testing.T) {
// Instantiate new minio client object
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1014,10 +1014,10 @@ func TestFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
index 64f8a77f8..d19d3e1ae 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v4_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
@@ -70,10 +70,10 @@ func TestMakeBucketError(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -116,10 +116,10 @@ func TestMakeBucketRegions(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -167,10 +167,10 @@ func TestPutObjectReadAt(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -204,7 +204,10 @@ func TestPutObjectReadAt(t *testing.T) {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ // Object content type
+ objectContentType := "binary/octet-stream"
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), objectContentType)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -227,6 +230,105 @@ func TestPutObjectReadAt(t *testing.T) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
+ if st.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, expected: %+v, found: %+v\n", objectContentType, st.ContentType)
+ }
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func TestPutObjectWithMetadata(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ os.Getenv("S3_ADDRESS"),
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ mustParseBool(os.Getenv("S3_SECURE")),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data using 2 parts
+ buf := make([]byte, minPartSize*2)
+ // Use crand.Reader for multipart tests to ensure part order at the end.
+ size, err := io.ReadFull(crand.Reader, buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*2 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, size)
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+
+ n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{"Content-Type": []string{customContentType}}, nil)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+ if st.ContentType != customContentType {
+ t.Fatalf("Error: Expected and found content types do not match, want %v, got %v\n",
+ customContentType, st.ContentType)
+ }
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
@@ -255,10 +357,10 @@ func TestListPartiallyUploaded(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -334,10 +436,10 @@ func TestGetOjectSeekEnd(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -429,10 +531,10 @@ func TestGetObjectClosedTwice(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -509,10 +611,10 @@ func TestRemoveMultipleObjects(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
@@ -583,10 +685,10 @@ func TestRemovePartiallyUploaded(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -656,10 +758,10 @@ func TestResumablePutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -702,9 +804,10 @@ func TestResumablePutObject(t *testing.T) {
// New object name.
objectName := bucketName + "-resumable"
+ objectContentType := "application/custom-octet-stream"
// Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ n, err = c.FPutObject(bucketName, objectName, file.Name(), objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
@@ -718,17 +821,22 @@ func TestResumablePutObject(t *testing.T) {
t.Fatal("Error:", err)
}
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
+ // Get object info.
+ objInfo, err := reader.Stat()
if err != nil {
t.Fatal("Error:", err)
}
- // Get object info.
- objInfo, err := reader.Stat()
+ if objInfo.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
+ }
+
+ // Upload now cloud to cloud.
+ n, err = c.PutObject(bucketName, objectName+"-put", reader, objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
+
if n != objInfo.Size {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
}
@@ -766,10 +874,10 @@ func TestResumableFPutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -856,10 +964,10 @@ func TestFPutObjectMultipart(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -912,9 +1020,10 @@ func TestFPutObjectMultipart(t *testing.T) {
// Set base object name
objectName := bucketName + "FPutObject"
+ objectContentType := "testapplication/octet-stream"
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
@@ -922,6 +1031,21 @@ func TestFPutObjectMultipart(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
}
+ r, err := c.GetObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v\n", err)
+ }
+ objInfo, err := r.Stat()
+ if err != nil {
+ t.Fatalf("Unexpected error: %v\n", err)
+ }
+ if objInfo.Size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+ if objInfo.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
+ }
+
// Remove all objects and bucket and temp file
err = c.RemoveObject(bucketName, objectName+"-standard")
if err != nil {
@@ -945,10 +1069,10 @@ func TestFPutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1105,10 +1229,10 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1238,10 +1362,10 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1387,10 +1511,10 @@ func TestPresignedPostPolicy(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1482,10 +1606,10 @@ func TestCopyObject(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1648,10 +1772,10 @@ func TestBucketNotification(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1724,10 +1848,10 @@ func TestFunctional(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
index 817a8c2c7..c1db0df5d 100644
--- a/vendor/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -18,11 +18,9 @@ package minio
import (
"bytes"
- "fmt"
"io"
"io/ioutil"
"net/http"
- "net/url"
"os"
"strings"
"testing"
@@ -202,49 +200,6 @@ func TestTempFile(t *testing.T) {
}
}
-// Tests url encoding.
-func TestEncodeURL2Path(t *testing.T) {
- type urlStrings struct {
- objName string
- encodedObjName string
- }
-
- bucketName := "bucketName"
- want := []urlStrings{
- {
- objName: "本語",
- encodedObjName: "%E6%9C%AC%E8%AA%9E",
- },
- {
- objName: "本語.1",
- encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
- },
- {
- objName: ">123>3123123",
- encodedObjName: "%3E123%3E3123123",
- },
- {
- objName: "test 1 2.txt",
- encodedObjName: "test%201%202.txt",
- },
- {
- objName: "test++ 1.txt",
- encodedObjName: "test%2B%2B%201.txt",
- },
- }
-
- for _, o := range want {
- u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
- if err != nil {
- t.Fatal("Error:", err)
- }
- urlPath := "/" + bucketName + "/" + o.encodedObjName
- if urlPath != encodeURL2Path(u) {
- t.Fatal("Error")
- }
- }
-}
-
// Tests error response structure.
func TestErrorResponse(t *testing.T) {
var err error
@@ -270,53 +225,6 @@ func TestErrorResponse(t *testing.T) {
}
}
-// Tests signature calculation.
-func TestSignatureCalculation(t *testing.T) {
- req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
- if err != nil {
- t.Fatal("Error:", err)
- }
- req = signV4(*req, "", "", "us-east-1")
- if req.Header.Get("Authorization") != "" {
- t.Fatal("Error: anonymous credentials should not have Authorization header.")
- }
-
- req = preSignV4(*req, "", "", "us-east-1", 0)
- if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
- t.Fatal("Error: anonymous credentials should not have Signature query resource.")
- }
-
- req = signV2(*req, "", "")
- if req.Header.Get("Authorization") != "" {
- t.Fatal("Error: anonymous credentials should not have Authorization header.")
- }
-
- req = preSignV2(*req, "", "", 0)
- if strings.Contains(req.URL.RawQuery, "Signature") {
- t.Fatal("Error: anonymous credentials should not have Signature query resource.")
- }
-
- req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
- if req.Header.Get("Authorization") == "" {
- t.Fatal("Error: normal credentials should have Authorization header.")
- }
-
- req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
- if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
- t.Fatal("Error: normal credentials should have Signature query resource.")
- }
-
- req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
- if req.Header.Get("Authorization") == "" {
- t.Fatal("Error: normal credentials should have Authorization header.")
- }
-
- req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
- if !strings.Contains(req.URL.RawQuery, "Signature") {
- t.Fatal("Error: normal credentials should not have Signature query resource.")
- }
-}
-
// Tests signature type.
func TestSignatureType(t *testing.T) {
clnt := Client{}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 4ad106959..46dfe9348 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -23,6 +23,9 @@ import (
"path"
"strings"
"sync"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// bucketLocationCache - Provides simple mechanism to hold bucket
@@ -85,7 +88,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return location, nil
}
- if isAmazonChinaEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@@ -160,10 +163,14 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
urlValues.Set("location", "")
// Set get bucket location always as path style.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
+ targetURL := c.endpointURL
+
+ // Requesting a bucket location from an accelerate endpoint returns a 400,
+ // so default to us-east-1 for the lookup
+ if s3utils.IsAmazonS3AccelerateEndpoint(c.endpointURL) {
+ targetURL.Host = getS3Endpoint("us-east-1")
}
+
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
@@ -189,9 +196,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Sign the request.
if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go
index 81cfbc097..0c068c966 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache_test.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go
@@ -26,6 +26,8 @@ import (
"path"
"reflect"
"testing"
+
+ "github.com/minio/minio-go/pkg/s3signer"
)
// Test validates `newBucketLocationCache`.
@@ -70,14 +72,12 @@ func TestGetBucketLocationRequest(t *testing.T) {
urlValues.Set("location", "")
// Set get bucket location always as path style.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
+ targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
+ var err error
req, err = http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
@@ -93,9 +93,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// Sign the request.
if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
index 121a63a77..4f60f1c8b 100644
--- a/vendor/github.com/minio/minio-go/bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -84,7 +84,7 @@ func (arn Arn) String() string {
// NotificationConfig - represents one single notification configuration
// such as topic, queue or lambda configuration.
type NotificationConfig struct {
- Id string `xml:"Id,omitempty"`
+ ID string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []NotificationEventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 779ed8c7a..057c3eef4 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -44,3 +44,9 @@ const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// Signature related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+)
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index 9977c5df9..0365c7fad 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -9,9 +9,9 @@
package main
import (
- "fmt"
+ "fmt"
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
)
func main() {
@@ -22,7 +22,7 @@ func main() {
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
if err != nil {
fmt.Println(err)
- return
+ return
}
}
@@ -35,9 +35,9 @@ func main() {
package main
import (
- "fmt"
+ "fmt"
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
)
func main() {
@@ -48,18 +48,18 @@ func main() {
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl)
if err != nil {
fmt.Println(err)
- return
+ return
}
}
```
-| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations |
-|:---|:---|:---|:---|
-|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) |
-|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) |
-|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) |
-| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) |
+| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
+|:---|:---|:---|:---|:---|
+|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
+|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
+|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) |
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
@@ -77,10 +77,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`endpoint` | _string_ |S3 object storage endpoint. |
-| `accessKeyID` |_string_ | Access key for the object storage endpoint. |
-| `secretAccessKey` | _string_ |Secret key for the object storage endpoint. |
-|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`endpoint` | _string_ |S3 compatible object storage endpoint |
+|`accessKeyID` |_string_ |Access key for the object storage |
+|`secretAccessKey` | _string_ |Secret key for the object storage |
+|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
## 2. Bucket operations
@@ -94,8 +94,8 @@ __Parameters__
| Param | Type | Description |
|---|---|---|
-|`bucketName` | _string_ | Name of the bucket. |
-| `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
+|`bucketName` | _string_ | Name of the bucket |
+| `location` | _string_ | Region where the bucket is to be created. Default value is us-east-1. Other valid values are listed below. Note: When used with minio server, use the region specified in its config file (defaults to us-east-1).|
| | |us-east-1 |
| | |us-west-1 |
| | |us-west-2 |
@@ -104,7 +104,7 @@ __Parameters__
| | | ap-southeast-1|
| | | ap-northeast-1|
| | | ap-southeast-2|
-| | | sa-east-1|
+| | | sa-east-1|
__Example__
@@ -128,30 +128,30 @@ Lists all buckets.
| Param | Type | Description |
|---|---|---|
-|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
+|`bucketList` | _[]BucketInfo_ | Lists of all buckets |
| Param | Type | Description |
|---|---|---|
-|`bucket.Name` | _string_ | bucket name. |
-|`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
+|`bucket.Name` | _string_ | Name of the bucket |
+|`bucket.CreationDate` | _time.Time_ | Date of bucket creation |
- __Example__
+__Example__
-
- ```go
- buckets, err := minioClient.ListBuckets()
-if err != nil {
+```go
+
+buckets, err := minioClient.ListBuckets()
+ if err != nil {
fmt.Println(err)
return
}
for _, bucket := range buckets {
- fmt.Println(bucket)
-}
+ fmt.Println(bucket)
+}
- ```
+```
<a name="BucketExists"></a>
### BucketExists(bucketName string) (found bool, err error)
@@ -163,15 +163,15 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`found` | _bool_ | indicates whether bucket exists or not |
-|`err` | _error_ | standard error |
+|`found` | _bool_ | Indicates whether bucket exists or not |
+|`err` | _error_ | Standard Error |
__Example__
@@ -200,7 +200,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Example__
@@ -225,24 +225,24 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
-| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` |_string_ | Prefix of objects to be listed |
+|`recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjects iterator. |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
|Param |Type |Description |
|:---|:---| :---|
-|`objectInfo.Key` | _string_ |name of the object. |
-|`objectInfo.Size` | _int64_ |size of the object. |
-|`objectInfo.ETag` | _string_ |etag of the object. |
-|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objectInfo.Key` | _string_ |Name of the object |
+|`objectInfo.Size` | _int64_ |Size of the object |
+|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
+|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
```go
@@ -269,17 +269,17 @@ for object := range objectCh {
<a name="ListObjectsV2"></a>
### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-Lists objects in a bucket using the recommanded listing API v2
+Lists objects in a bucket using the recommended listing API v2
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
+|`bucketName` | _string_ |Name of the bucket |
+| `objectPrefix` |_string_ | Prefix of objects to be listed |
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjectsV2 iterator. |
__Return Value__
@@ -290,10 +290,10 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objectInfo.Key` | _string_ |name of the object. |
-|`objectInfo.Size` | _int64_ |size of the object. |
-|`objectInfo.ETag` | _string_ |etag of the object. |
-|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objectInfo.Key` | _string_ |Name of the object |
+|`objectInfo.Size` | _int64_ |Size of the object |
+|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
+|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
```go
@@ -327,25 +327,25 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `prefix` |_string_ | prefix of the object names that are partially uploaded |
+|`bucketName` | _string_ |Name of the bucket |
+| `prefix` |_string_ | Prefix of objects that are partially uploaded |
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenIncompleteUploads iterator. |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
+|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
-|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
-|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
+|`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object |
+|`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object |
+|`multiPartObjInfo.Size` | _int64_ |Size of incompletely uploaded object |
__Example__
@@ -383,8 +383,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Return Value__
@@ -427,9 +427,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`filePath` | _string_ |path to which the object data will be written to. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
__Example__
@@ -446,7 +446,7 @@ if err != nil {
```
<a name="PutObject"></a>
-### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
Uploads an object.
@@ -456,16 +456,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`reader` | _io.Reader_ |Any golang object implementing io.Reader. |
-|`contentType` | _string_ |content type of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
+|`contentType` | _string_ |Content type of the object |
__Example__
-Uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, PutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
@@ -499,10 +499,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`objectSource` | _string_ |name of the object source. |
-|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`].|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`objectSource` | _string_ |Name of the source object |
+|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`]|
__Example__
@@ -537,7 +537,7 @@ if err != nil {
<a name="FPutObject"></a>
### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
-Uploads contents from a file to objectName.
+Uploads contents from a file to objectName.
__Parameters__
@@ -545,16 +545,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`filePath` | _string_ |file path of the file to be uploaded. |
-|`contentType` | _string_ |content type of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`contentType` | _string_ |Content type of the object |
__Example__
-FPutObject uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, FPutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
@@ -579,28 +579,28 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
+|`objInfo` | _ObjectInfo_ |Object stat information |
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo.LastModified` | _time.Time_ |modified time stamp. |
-|`objInfo.ETag` | _string_ |etag of the object.|
-|`objInfo.ContentType` | _string_ |Content-Type of the object.|
-|`objInfo.Size` | _int64_ |size of the object.|
+|`objInfo.LastModified` | _time.Time_ |Time when object was last modified |
+|`objInfo.ETag` | _string_ |MD5 checksum of the object|
+|`objInfo.ContentType` | _string_ |Content type of the object|
+|`objInfo.Size` | _int64_ |Size of the object|
__Example__
-
+
```go
objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
@@ -623,8 +623,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
```go
@@ -639,22 +639,22 @@ if err != nil {
<a name="RemoveObjects"></a>
### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
-Removes a list of objects obtained from an input channel. The call internally buffers up `1000` at
-a time and initiates a delete request to the server. Upon any error is sent through the error channel.
+Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time.
+The errors observed are sent over the error channel.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectsCh` | _chan string_ | write prefixes of objects to be removed |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectsCh` | _chan string_ | Prefix of objects to be removed |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`errorCh` | _chan minio.RemoveObjectError | read objects deletion errors |
+|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. |
@@ -679,8 +679,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Example__
@@ -708,10 +708,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`expiry` | _time.Duration_ |expiry in seconds. |
-|`reqParams` | _url.Values_ |additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
+|`reqParams` | _url.Values_ |Additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
__Example__
@@ -738,7 +738,7 @@ if err != nil {
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
NOTE: you can upload to S3 only with specified object name.
-
+
__Parameters__
@@ -746,9 +746,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`expiry` | _time.Duration_ |expiry in seconds. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
__Example__
@@ -763,7 +763,7 @@ if err != nil {
fmt.Println(err)
return
}
- fmt.Println(presignedURL)
+fmt.Println(presignedURL)
```
@@ -833,9 +833,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket.|
-|`objectPrefix` | _string_ |name of the object prefix.|
-|`policy` | _policy.BucketPolicy_ |policy can be:|
+|`bucketName` | _string_ |Name of the bucket|
+|`objectPrefix` | _string_ |Name of the object prefix|
+|`policy` | _policy.BucketPolicy_ |Policy can be one of the following: |
|| |policy.BucketPolicyNone|
| | |policy.BucketPolicyReadOnly|
|| |policy.BucketPolicyReadWrite|
@@ -847,7 +847,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -875,8 +875,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectPrefix` | _string_ |name of the object prefix |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
__Return Values__
@@ -884,7 +884,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
|`bucketPolicy` | _policy.BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -910,16 +910,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectPrefix` | _string_ |name of the object prefix |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketPolicies` | _map[string]BucketPolicy_ |map that contains object resources paths with their associated permissions |
-|`err` | _error_ |standard error |
+|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions |
+|`err` | _error_ |Standard Error |
__Example__
@@ -947,7 +947,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
@@ -955,7 +955,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -963,10 +963,11 @@ __Example__
```go
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
if err != nil {
- for _, topicConfig := range bucketNotification.TopicConfigs {
- for _, e := range topicConfig.Events {
- fmt.Println(e + " event is enabled")
- }
+ log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err)
+}
+for _, topicConfig := range bucketNotification.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ fmt.Println(e + " event is enabled")
}
}
```
@@ -981,15 +982,15 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`bucketNotification` | _BucketNotification_ |bucket notification. |
+|`bucketName` | _string_ |Name of the bucket |
+|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -1006,7 +1007,7 @@ bucketNotification := BucketNotification{}
bucketNotification.AddTopic(topicConfig)
err := c.SetBucketNotification(bucketName, bucketNotification)
if err != nil {
- fmt.Println("Cannot set the bucket notification: " + err)
+ fmt.Println("Unable to set the bucket notification: " + err)
}
```
@@ -1020,14 +1021,14 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -1035,7 +1036,7 @@ __Example__
```go
err := c.RemoveAllBucketNotification(bucketName)
if err != nil {
- fmt.Println("Cannot remove bucket notifications.")
+ fmt.Println("Unable to remove bucket notifications.", err)
}
```
@@ -1056,20 +1057,20 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ | Bucket to listen notifications from. |
-|`prefix` | _string_ | Object key prefix to filter notifications for. |
-|`suffix` | _string_ | Object key suffix to filter notifications for. |
-|`events` | _[]string_| Enables notifications for specific event types. |
-|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
+|`bucketName` | _string_ | Bucket to listen notifications on |
+|`prefix` | _string_ | Object key prefix to filter notifications for |
+|`suffix` | _string_ | Object key suffix to filter notifications for |
+|`events` | _[]string_| Enables notifications for specific event types |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
-|`NotificationInfo` | _object_ | Notification object represents events info. |
-|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
-|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
+|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket |
+|`NotificationInfo` | _object_ | Notification object represents events info |
+|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
__Example__
@@ -1085,17 +1086,69 @@ defer close(doneCh)
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
- "s3:ObjectCreated:*",
- "s3:ObjectRemoved:*",
-}, doneCh) {
- if notificationInfo.Err != nil {
- log.Fatalln(notificationInfo.Err)
- }
- log.Println(notificationInfo)
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
}
```
-## 6. Explore Further
+## 6. Client custom settings
-- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
+<a name="SetAppInfo"></a>
+### SetAppInfo(appName string, appVersion string)
+Adds application details to User-Agent.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`appName` | _string_ | Name of the application performing the API requests. |
+| `appVersion`| _string_ | Version of the application performing the API requests. |
+
+__Example__
+
+
+```go
+
+// Set Application name and version to be used in subsequent API requests.
+minioClient.SetAppInfo("myCloudApp", "1.0.0")
+
+```
+
+<a name="SetCustomTransport"></a>
+### SetCustomTransport(customHTTPTransport http.RoundTripper)
+Overrides default HTTP transport. This is usually needed for debugging
+or for adding custom TLS certificates.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`customHTTPTransport` | _http.RoundTripper_ | Custom transport e.g, to trace API requests and responses for debugging purposes.|
+
+
+<a name="TraceOn"></a>
+### TraceOn(outputStream io.Writer)
+Enables HTTP tracing. The trace is written to the io.Writer
+provided. If outputStream is nil, trace is written to os.Stdout.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`outputStream` | _io.Writer_ | HTTP trace is written into outputStream.|
+
+
+<a name="TraceOff"></a>
+### TraceOff()
+Disables HTTP tracing.
+
+
+## 7. Explore Further
+
+- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
index f618059cf..cbb889d8d 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
@@ -34,7 +34,7 @@ const (
BucketPolicyWriteOnly = "writeonly"
)
-// isValidBucketPolicy - Is provided policy value supported.
+// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
@@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
return readOnly, writeOnly
}
-// Returns policy of given bucket name, prefix in given statements.
+// GetPolicy - Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
@@ -563,7 +563,7 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
return policy
}
-// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
+// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
policyRules := map[string]BucketPolicy{}
objResources := set.NewStringSet()
@@ -590,8 +590,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
return policyRules
}
-// Returns new statements containing policy of given bucket name and
-// prefix are appended.
+// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
index b9f248253..e1ec6c02c 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -45,22 +47,22 @@ func encodeURL2Path(u *url.URL) (path string) {
bucketName := hostSplits[0]
path = "/" + bucketName
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
- path = urlEncodePath(u.Path)
+ path = s3utils.EncodePath(u.Path)
return
}
-// preSignV2 - presign the request in following style.
+// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
-func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -95,18 +97,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
- req.URL.RawQuery = queryEncode(query)
+ req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
- req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
}
-// postPresignSignatureV2 - presigned signature for PostPolicy
+// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
-func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
@@ -129,8 +131,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
//
// CanonicalizedProtocolHeaders = <described below>
-// signV2 sign the request before Do() (AWS Signature Version 2).
-func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -287,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
+ query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
index 6d861fb81..3c0e0ecea 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v2_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"sort"
diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
index 2be3808d6..3322b67cc 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string {
return scope
}
-// getCredential generate a credential string.
-func getCredential(accessKeyID, location string, t time.Time) string {
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t)
return accessKeyID + "/" + scope
}
@@ -185,7 +187,7 @@ func getCanonicalRequest(req http.Request) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
- urlEncodePath(req.URL.Path),
+ s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
@@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
return stringToSign
}
-// preSignV4 presign the request, in accordance with
+// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
-func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -214,7 +216,7 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
t := time.Now().UTC()
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
@@ -246,9 +248,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
return &req
}
-// postPresignSignatureV4 - presigned signature for PostPolicy
+// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
-func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
@@ -256,9 +258,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
return signature
}
-// signV4 sign the request before Do(), in accordance with
+// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
-func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -280,7 +282,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
signingKey := getSigningKey(secretAccessKey, location, t)
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
new file mode 100644
index 000000000..6f5ba1895
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
@@ -0,0 +1,70 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "net/http"
+ "strings"
+ "testing"
+)
+
+// Tests signature calculation.
+func TestSignatureCalculation(t *testing.T) {
+ req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ req = SignV4(*req, "", "", "us-east-1")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = PreSignV4(*req, "", "", "us-east-1", 0)
+ if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = SignV2(*req, "", "")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = PreSignV2(*req, "", "", 0)
+ if strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
+ if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: normal credentials should have Signature query resource.")
+ }
+
+ req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
+ if !strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: normal credentials should not have Signature query resource.")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
new file mode 100644
index 000000000..0619b3082
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
@@ -0,0 +1,39 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+)
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
new file mode 100644
index 000000000..b266e42a1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
@@ -0,0 +1,66 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "fmt"
+ "net/url"
+ "testing"
+)
+
+// Tests url encoding.
+func TestEncodeURL2Path(t *testing.T) {
+ type urlStrings struct {
+ objName string
+ encodedObjName string
+ }
+
+ bucketName := "bucketName"
+ want := []urlStrings{
+ {
+ objName: "本語",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E",
+ },
+ {
+ objName: "本語.1",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
+ },
+ {
+ objName: ">123>3123123",
+ encodedObjName: "%3E123%3E3123123",
+ },
+ {
+ objName: "test 1 2.txt",
+ encodedObjName: "test%201%202.txt",
+ },
+ {
+ objName: "test++ 1.txt",
+ encodedObjName: "test%2B%2B%201.txt",
+ },
+ }
+
+ for _, o := range want {
+ u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ urlPath := "/" + bucketName + "/" + o.encodedObjName
+ if urlPath != encodeURL2Path(u) {
+ t.Fatal("Error")
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
new file mode 100644
index 000000000..ae1cea337
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -0,0 +1,195 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if IsAmazonChinaEndpoint(endpointURL) {
+ return true
+ }
+
+ if IsAmazonS3AccelerateEndpoint(endpointURL) {
+ return true
+ }
+
+ return endpointURL.Host == "s3.amazonaws.com"
+}
+
+// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
+// Customers who wish to use the new Beijing Region are required
+// to sign up for a separate set of account credentials unique to
+// the China (Beijing) Region. Customers with existing AWS credentials
+// will not be able to access resources in the new Region, and vice versa.
+// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
+func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
+}
+
+// IsAmazonS3AccelerateEndpoint - Match if it is an Amazon S3 Accelerate
+func IsAmazonS3AccelerateEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3-accelerate.amazonaws.com"
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
new file mode 100644
index 000000000..f790861cd
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
@@ -0,0 +1,284 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "net/url"
+ "testing"
+)
+
+// Tests for 'isValidDomain(host string) bool'.
+func TestIsValidDomain(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ host string
+ // Expected result.
+ result bool
+ }{
+ {"s3.amazonaws.com", true},
+ {"s3.cn-north-1.amazonaws.com.cn", true},
+ {"s3.amazonaws.com_", false},
+ {"%$$$", false},
+ {"s3.amz.test.com", true},
+ {"s3.%%", false},
+ {"localhost", true},
+ {"-localhost", false},
+ {"", false},
+ {"\n \t", false},
+ {" ", false},
+ }
+
+ for i, testCase := range testCases {
+ result := IsValidDomain(testCase.host)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate IP address validator.
+func TestIsValidIP(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ ip string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", true},
+ {"192.168.1", false},
+ {"192.168.1.1.1", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ }
+
+ for i, testCase := range testCases {
+ result := IsValidIP(testCase.ip)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
+ }
+ }
+
+}
+
+// Tests validate virtual host validator.
+func TestIsVirtualHostSupported(t *testing.T) {
+ testCases := []struct {
+ url string
+ bucket string
+ // Expeceted result.
+ result bool
+ }{
+ {"https://s3.amazonaws.com", "my-bucket", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
+ {"https://s3.amazonaws.com", "my-bucket.", false},
+ {"https://amazons3.amazonaws.com", "my-bucket.", false},
+ {"https://storage.googleapis.com/", "my-bucket", true},
+ {"https://mystorage.googleapis.com/", "my-bucket", false},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsVirtualHostSupported(*u, testCase.bucket)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
+ }
+ }
+}
+
+// Tests validate Amazon endpoint validator.
+func TestIsAmazonEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"https://s3.amazonaws.com", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsAmazonEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Amazon S3 China endpoint validator.
+func TestIsAmazonChinaEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // s3.amazonaws.com is not a valid Amazon S3 China end point.
+ {"https://s3.amazonaws.com", false},
+ // valid input.
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsAmazonChinaEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Google Cloud end point validator.
+func TestIsGoogleEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", false},
+ {"https://192.168.1.1", false},
+ {"s3.amazonaws.com", false},
+ {"http://s3.amazonaws.com", false},
+ {"https://s3.amazonaws.com", false},
+ {"https://s3.cn-north-1.amazonaws.com.cn", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"http://storage.googleapis.com", true},
+ {"https://storage.googleapis.com", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsGoogleEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+func TestPercentEncodeSlash(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {"test123", "test123"},
+ {"abc,+_1", "abc,+_1"},
+ {"%40prefix=test%40123", "%40prefix=test%40123"},
+ {"key1=val1/val2", "key1=val1%2Fval2"},
+ {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
+ }
+
+ for i, testCase := range testCases {
+ receivedOutput := percentEncodeSlash(testCase.input)
+ if testCase.output != receivedOutput {
+ t.Errorf(
+ "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
+ i+1, testCase.input, testCase.output,
+ receivedOutput,
+ )
+
+ }
+ }
+}
+
+// Tests validate the query encoder.
+func TestQueryEncode(t *testing.T) {
+ testCases := []struct {
+ queryKey string
+ valueToEncode []string
+ // Expected result.
+ result string
+ }{
+ {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
+ {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
+ {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
+ {"prefix", []string{"test#123"}, "prefix=test%23123"},
+ {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
+ {"prefix", []string{"test123"}, "prefix=test123"},
+ {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
+ }
+
+ for i, testCase := range testCases {
+ urlValues := make(url.Values)
+ for _, valueToEncode := range testCase.valueToEncode {
+ urlValues.Add(testCase.queryKey, valueToEncode)
+ }
+ result := QueryEncode(urlValues)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate the URL path encoder.
+func TestEncodePath(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ inputStr string
+ // Expected result.
+ result string
+ }{
+ {"thisisthe%url", "thisisthe%25url"},
+ {"本語", "%E6%9C%AC%E8%AA%9E"},
+ {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
+ {">123", "%3E123"},
+ {"myurl#link", "myurl%23link"},
+ {"space in url", "space%20in%20url"},
+ {"url+path", "url%2Bpath"},
+ }
+
+ for i, testCase := range testCases {
+ result := EncodePath(testCase.inputStr)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
index 2a675d770..5e716124a 100644
--- a/vendor/github.com/minio/minio-go/post-policy.go
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
return nil
}
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return ErrInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go
new file mode 100644
index 000000000..e300af69c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry-continous.go
@@ -0,0 +1,52 @@
+package minio
+
+import "time"
+
+// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
+func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // 1<<uint(attempt) below could overflow, so limit the value of attempt
+ maxAttempt := 30
+ if attempt > maxAttempt {
+ attempt = maxAttempt
+ }
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1<<uint(attempt))
+ if sleep > cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ var nextBackoff int
+ for {
+ select {
+ // Attempts starts.
+ case attemptCh <- nextBackoff:
+ nextBackoff++
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(nextBackoff))
+ }
+ }()
+ return attemptCh
+}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
index 3f159bd9d..d7fa5e038 100644
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -20,9 +20,12 @@ package minio
// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
+ "us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
+ "ca-central-1": "s3.ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
+ "eu-west-2": "s3-eu-west-2.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
diff --git a/vendor/github.com/minio/minio-go/test-utils_test.go b/vendor/github.com/minio/minio-go/test-utils_test.go
index 179c28a23..4134af996 100644
--- a/vendor/github.com/minio/minio-go/test-utils_test.go
+++ b/vendor/github.com/minio/minio-go/test-utils_test.go
@@ -21,6 +21,7 @@ import (
"encoding/xml"
"io/ioutil"
"net/http"
+ "strconv"
)
// Contains common used utilities for tests.
@@ -62,3 +63,12 @@ func encodeResponse(response interface{}) []byte {
encode.Encode(response)
return bytesBuffer.Bytes()
}
+
+// Convert string to bool and always return true if any error
+func mustParseBool(str string) bool {
+ b, err := strconv.ParseBool(str)
+ if err != nil {
+ return true
+ }
+ return b
+}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 2208d3603..93cd1712f 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -17,11 +17,8 @@
package minio
import (
- "bytes"
- "crypto/hmac"
"crypto/md5"
"crypto/sha256"
- "encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@@ -29,10 +26,11 @@ import (
"net/http"
"net/url"
"regexp"
- "sort"
"strings"
"time"
"unicode/utf8"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
@@ -55,13 +53,6 @@ func sumMD5(data []byte) []byte {
return hash.Sum(nil)
}
-// sumHMAC calculate hmac between two input byte array.
-func sumHMAC(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
@@ -69,12 +60,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if err != nil {
return nil, err
}
- if !isValidIP(host) && !isValidDomain(host) {
+ if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
- if !isValidIP(endpoint) && !isValidDomain(endpoint) {
+ if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
@@ -93,45 +84,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
}
// Validate incoming endpoint URL.
- if err := isValidEndpointURL(endpointURL.String()); err != nil {
+ if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
-// isValidDomain validates if input string is a valid domain name.
-func isValidDomain(host string) bool {
- // See RFC 1035, RFC 3696.
- host = strings.TrimSpace(host)
- if len(host) == 0 || len(host) > 255 {
- return false
- }
- // host cannot start or end with "-"
- if host[len(host)-1:] == "-" || host[:1] == "-" {
- return false
- }
- // host cannot start or end with "_"
- if host[len(host)-1:] == "_" || host[:1] == "_" {
- return false
- }
- // host cannot start or end with a "."
- if host[len(host)-1:] == "." || host[:1] == "." {
- return false
- }
- // All non alphanumeric characters are invalid.
- if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
- return false
- }
- // No need to regexp match, since the list is non-exhaustive.
- // We let it valid and fail later.
- return true
-}
-
-// isValidIP parses input string for ip address validity.
-func isValidIP(ip string) bool {
- return net.ParseIP(ip) != nil
-}
-
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
@@ -152,92 +110,24 @@ func closeResponse(resp *http.Response) {
}
}
-// isVirtualHostSupported - verifies if bucketName can be part of
-// virtual host. Currently only Amazon S3 and Google Cloud Storage
-// would support this.
-func isVirtualHostSupported(endpointURL string, bucketName string) bool {
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- // bucketName can be valid but '.' in the hostname will fail SSL
- // certificate validation. So do not use host-style for such buckets.
- if url.Scheme == "https" && strings.Contains(bucketName, ".") {
- return false
- }
- // Return true for all other cases
- return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
-}
-
-// Match if it is exactly Amazon S3 endpoint.
-func isAmazonEndpoint(endpointURL string) bool {
- if isAmazonChinaEndpoint(endpointURL) {
- return true
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "s3.amazonaws.com" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Amazon S3 China endpoint.
-// Customers who wish to use the new Beijing Region are required
-// to sign up for a separate set of account credentials unique to
-// the China (Beijing) Region. Customers with existing AWS credentials
-// will not be able to access resources in the new Region, and vice versa.
-// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
-func isAmazonChinaEndpoint(endpointURL string) bool {
- if endpointURL == "" {
- return false
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Google cloud storage endpoint.
-func isGoogleEndpoint(endpointURL string) bool {
- if endpointURL == "" {
- return false
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "storage.googleapis.com" {
- return true
- }
- return false
-}
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
// Verify if input endpoint URL is valid.
-func isValidEndpointURL(endpointURL string) error {
- if endpointURL == "" {
+func isValidEndpointURL(endpointURL url.URL) error {
+ if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
- url, err := url.Parse(endpointURL)
- if err != nil {
+ if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
- if url.Path != "/" && url.Path != "" {
- return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
- }
- if strings.Contains(endpointURL, ".amazonaws.com") {
- if !isAmazonEndpoint(endpointURL) {
+ if strings.Contains(endpointURL.Host, ".amazonaws.com") {
+ if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
- if strings.Contains(endpointURL, ".googleapis.com") {
- if !isGoogleEndpoint(endpointURL) {
+ if strings.Contains(endpointURL.Host, ".googleapis.com") {
+ if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
@@ -260,6 +150,9 @@ func isValidExpiry(expires time.Duration) error {
// style requests instead for such buckets.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+// Invalid bucket name with double dot.
+var invalidDotBucketName = regexp.MustCompile(`\.\.`)
+
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
@@ -275,7 +168,7 @@ func isValidBucketName(bucketName string) error {
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
- if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
+ if invalidDotBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
@@ -310,74 +203,25 @@ func isValidObjectPrefix(objectPrefix string) error {
return nil
}
-//expects ascii encoded strings - from output of urlEncodePath
-func percentEncodeSlash(s string) string {
- return strings.Replace(s, "/", "%2F", -1)
-}
-
-// queryEncode - encodes query values in their URL encoded form. In
-// addition to the percent encoding performed by urlEncodePath() used
-// here, it also percent encodes '/' (forward slash)
-func queryEncode(v url.Values) string {
- if v == nil {
- return ""
- }
- var buf bytes.Buffer
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- vs := v[k]
- prefix := percentEncodeSlash(urlEncodePath(k)) + "="
- for _, v := range vs {
- if buf.Len() > 0 {
- buf.WriteByte('&')
- }
- buf.WriteString(prefix)
- buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
- }
+// make a copy of http.Header
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
}
- return buf.String()
+ return h2
}
-// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
-//
-// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
-// non english characters cannot be parsed due to the nature in which url.Encode() is written
-//
-// This function on the other hand is a direct replacement for url.Encode() technique to support
-// pretty much every UTF-8 character.
-func urlEncodePath(pathName string) string {
- // if object matches reserved string, no need to encode them
- reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
- if reservedNames.MatchString(pathName) {
- return pathName
- }
- var encodedPathname string
- for _, s := range pathName {
- if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- }
- switch s {
- case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- default:
- len := utf8.RuneLen(s)
- if len < 0 {
- // if utf8 cannot convert return the same string as is
- return pathName
- }
- u := make([]byte, len)
- utf8.EncodeRune(u, s)
- for _, r := range u {
- hex := hex.EncodeToString([]byte{r})
- encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
- }
- }
+// Filter relevant response headers from
+// the HEAD, GET http response. The function takes
+// a list of headers which are filtered out and
+// returned as a new http header.
+func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
+ filteredHeader = cloneHeader(header)
+ for _, key := range filterKeys {
+ filteredHeader.Del(key)
}
- return encodedPathname
+ return filteredHeader
}
diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go
index 1a30d5441..99bdea329 100644
--- a/vendor/github.com/minio/minio-go/utils_test.go
+++ b/vendor/github.com/minio/minio-go/utils_test.go
@@ -17,11 +17,27 @@ package minio
import (
"fmt"
+ "net/http"
"net/url"
"testing"
"time"
)
+// Tests filter header function by filtering out
+// some custom header keys.
+func TestFilterHeader(t *testing.T) {
+ header := http.Header{}
+ header.Set("Content-Type", "binary/octet-stream")
+ header.Set("Content-Encoding", "gzip")
+ newHeader := filterHeader(header, []string{"Content-Type"})
+ if len(newHeader) > 1 {
+ t.Fatalf("Unexpected size of the returned header, should be 1, got %d", len(newHeader))
+ }
+ if newHeader.Get("Content-Encoding") != "gzip" {
+ t.Fatalf("Unexpected content-encoding value, expected 'gzip', got %s", newHeader.Get("Content-Encoding"))
+ }
+}
+
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
func TestGetEndpointURL(t *testing.T) {
testCases := []struct {
@@ -74,35 +90,6 @@ func TestGetEndpointURL(t *testing.T) {
}
}
-// Tests for 'isValidDomain(host string) bool'.
-func TestIsValidDomain(t *testing.T) {
- testCases := []struct {
- // Input.
- host string
- // Expected result.
- result bool
- }{
- {"s3.amazonaws.com", true},
- {"s3.cn-north-1.amazonaws.com.cn", true},
- {"s3.amazonaws.com_", false},
- {"%$$$", false},
- {"s3.amz.test.com", true},
- {"s3.%%", false},
- {"localhost", true},
- {"-localhost", false},
- {"", false},
- {"\n \t", false},
- {" ", false},
- }
-
- for i, testCase := range testCases {
- result := isValidDomain(testCase.host)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
- }
- }
-}
-
// Tests validate end point validator.
func TestIsValidEndpointURL(t *testing.T) {
testCases := []struct {
@@ -125,161 +112,33 @@ func TestIsValidEndpointURL(t *testing.T) {
}
for i, testCase := range testCases {
- err := isValidEndpointURL(testCase.url)
+ var u url.URL
+ if testCase.url == "" {
+ u = sentinelURL
+ } else {
+ u1, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ u = *u1
+ }
+ err := isValidEndpointURL(u)
if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err)
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err, err)
}
}
}
}
-// Tests validate IP address validator.
-func TestIsValidIP(t *testing.T) {
- testCases := []struct {
- // Input.
- ip string
- // Expected result.
- result bool
- }{
- {"192.168.1.1", true},
- {"192.168.1", false},
- {"192.168.1.1.1", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- }
-
- for i, testCase := range testCases {
- result := isValidIP(testCase.ip)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
- }
- }
-
-}
-
-// Tests validate virtual host validator.
-func TestIsVirtualHostSupported(t *testing.T) {
- testCases := []struct {
- url string
- bucket string
- // Expeceted result.
- result bool
- }{
- {"https://s3.amazonaws.com", "my-bucket", true},
- {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
- {"https://s3.amazonaws.com", "my-bucket.", false},
- {"https://amazons3.amazonaws.com", "my-bucket.", false},
- {"https://storage.googleapis.com/", "my-bucket", true},
- {"https://mystorage.googleapis.com/", "my-bucket", false},
- }
-
- for i, testCase := range testCases {
- result := isVirtualHostSupported(testCase.url, testCase.bucket)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
- }
- }
-}
-
-// Tests validate Amazon endpoint validator.
-func TestIsAmazonEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"https://192.168.1.1", false},
- {"192.168.1.1", false},
- {"http://storage.googleapis.com", false},
- {"https://storage.googleapis.com", false},
- {"storage.googleapis.com", false},
- {"s3.amazonaws.com", false},
- {"https://amazons3.amazonaws.com", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // valid inputs.
- {"https://s3.amazonaws.com", true},
- {"https://s3.cn-north-1.amazonaws.com.cn", true},
- }
-
- for i, testCase := range testCases {
- result := isAmazonEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
-// Tests validate Amazon S3 China endpoint validator.
-func TestIsAmazonChinaEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"https://192.168.1.1", false},
- {"192.168.1.1", false},
- {"http://storage.googleapis.com", false},
- {"https://storage.googleapis.com", false},
- {"storage.googleapis.com", false},
- {"s3.amazonaws.com", false},
- {"https://amazons3.amazonaws.com", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // s3.amazonaws.com is not a valid Amazon S3 China end point.
- {"https://s3.amazonaws.com", false},
- // valid input.
- {"https://s3.cn-north-1.amazonaws.com.cn", true},
- }
-
- for i, testCase := range testCases {
- result := isAmazonChinaEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
-// Tests validate Google Cloud end point validator.
-func TestIsGoogleEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"192.168.1.1", false},
- {"https://192.168.1.1", false},
- {"s3.amazonaws.com", false},
- {"http://s3.amazonaws.com", false},
- {"https://s3.amazonaws.com", false},
- {"https://s3.cn-north-1.amazonaws.com.cn", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // valid inputs.
- {"http://storage.googleapis.com", true},
- {"https://storage.googleapis.com", true},
- }
-
- for i, testCase := range testCases {
- result := isGoogleEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
// Tests validate the expiry time validator.
func TestIsValidExpiry(t *testing.T) {
testCases := []struct {
@@ -355,82 +214,3 @@ func TestIsValidBucketName(t *testing.T) {
}
}
-
-func TestPercentEncodeSlash(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {"test123", "test123"},
- {"abc,+_1", "abc,+_1"},
- {"%40prefix=test%40123", "%40prefix=test%40123"},
- {"key1=val1/val2", "key1=val1%2Fval2"},
- {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
- }
-
- for i, testCase := range testCases {
- receivedOutput := percentEncodeSlash(testCase.input)
- if testCase.output != receivedOutput {
- t.Errorf(
- "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
- i+1, testCase.input, testCase.output,
- receivedOutput,
- )
-
- }
- }
-}
-
-// Tests validate the query encoder.
-func TestQueryEncode(t *testing.T) {
- testCases := []struct {
- queryKey string
- valueToEncode []string
- // Expected result.
- result string
- }{
- {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
- {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
- {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
- {"prefix", []string{"test#123"}, "prefix=test%23123"},
- {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
- {"prefix", []string{"test123"}, "prefix=test123"},
- {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
- }
-
- for i, testCase := range testCases {
- urlValues := make(url.Values)
- for _, valueToEncode := range testCase.valueToEncode {
- urlValues.Add(testCase.queryKey, valueToEncode)
- }
- result := queryEncode(urlValues)
- if testCase.result != result {
- t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
- }
- }
-}
-
-// Tests validate the URL path encoder.
-func TestUrlEncodePath(t *testing.T) {
- testCases := []struct {
- // Input.
- inputStr string
- // Expected result.
- result string
- }{
- {"thisisthe%url", "thisisthe%25url"},
- {"本語", "%E6%9C%AC%E8%AA%9E"},
- {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
- {">123", "%3E123"},
- {"myurl#link", "myurl%23link"},
- {"space in url", "space%20in%20url"},
- {"url+path", "url%2Bpath"},
- }
-
- for i, testCase := range testCases {
- result := urlEncodePath(testCase.inputStr)
- if testCase.result != result {
- t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
- }
- }
-}