summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go')
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object.go32
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go6
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go28
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go24
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go3
-rw-r--r--vendor/github.com/minio/minio-go/api.go2
-rw-r--r--vendor/github.com/minio/minio-go/constants.go7
-rw-r--r--vendor/github.com/minio/minio-go/core.go2
-rw-r--r--vendor/github.com/minio/minio-go/functional_tests.go49
-rw-r--r--vendor/github.com/minio/minio-go/retry.go1
-rw-r--r--vendor/github.com/minio/minio-go/utils.go1
11 files changed, 109 insertions, 46 deletions
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
index 99b2adae8..5d3ede70d 100644
--- a/vendor/github.com/minio/minio-go/api-compose-object.go
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
+ * Copyright 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,6 +20,8 @@ package minio
import (
"context"
"fmt"
+ "io"
+ "io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -343,11 +345,12 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str
return p, nil
}
-// ComposeObject - creates an object using server-side copying of
+// ComposeObjectWithProgress - creates an object using server-side copying of
// existing objects. It takes a list of source objects (with optional
// offsets) and concatenates them into a new object using only
-// server-side copying operations.
-func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
+// server-side copying operations. Optionally takes progress reader hook
+// for applications to look at current progress.
+func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
}
@@ -421,7 +424,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
- return c.CopyObject(dst, srcs[0])
+ return c.CopyObjectWithProgress(dst, srcs[0], progress)
}
// Now, handle multipart-copy cases.
@@ -476,6 +479,9 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if err != nil {
return err
}
+ if progress != nil {
+ io.CopyN(ioutil.Discard, progress, start+end-1)
+ }
objParts = append(objParts, complPart)
partIndex++
}
@@ -490,10 +496,20 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
return nil
}
-// partsRequired is ceiling(size / copyPartSize)
+// ComposeObject - creates an object using server-side copying of
+// existing objects. It takes a list of source objects (with optional
+// offsets) and concatenates them into a new object using only
+// server-side copying operations.
+func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
+ return c.ComposeObjectWithProgress(dst, srcs, nil)
+}
+
+// partsRequired is maximum parts possible with
+// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
- r := size / copyPartSize
- if size%copyPartSize > 0 {
+ maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
+ r := size / int64(maxPartSize)
+ if size%int64(maxPartSize) > 0 {
r++
}
return r
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index 8920ac742..cb9d8f27a 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -99,13 +99,17 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
}
// SetBucketPolicy set the access permissions on an existing bucket.
-//
func (c Client) SetBucketPolicy(bucketName, policy string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
+ // If policy is empty then delete the bucket policy.
+ if policy == "" {
+ return c.removeBucketPolicy(bucketName)
+ }
+
// Save the updated policies.
return c.putBucketPolicy(bucketName, policy)
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index acd195fcd..21322ef6a 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
+ * Copyright 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +19,8 @@ package minio
import (
"context"
+ "io"
+ "io/ioutil"
"net/http"
"github.com/minio/minio-go/pkg/encrypt"
@@ -26,13 +28,31 @@ import (
// CopyObject - copy a source object into a new object
func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
+ return c.CopyObjectWithProgress(dst, src, nil)
+}
+
+// CopyObjectWithProgress - copy a source object into a new object, optionally takes
+// progress bar input to notify current progress.
+func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error {
header := make(http.Header)
for k, v := range src.Headers {
header[k] = v
}
+
+ var err error
+ var size int64
+ // If progress bar is specified, size should be requested as well initiate a StatObject request.
+ if progress != nil {
+ size, _, _, err = src.getProps(c)
+ if err != nil {
+ return err
+ }
+ }
+
if src.encryption != nil {
encrypt.SSECopy(src.encryption).Marshal(header)
}
+
if dst.encryption != nil {
dst.encryption.Marshal(header)
}
@@ -53,5 +73,11 @@ func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, dst.bucket, dst.object)
}
+
+ // Update the progress properly after successful copy.
+ if progress != nil {
+ io.CopyN(ioutil.Discard, progress, size)
+ }
+
return nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index 2402a7167..45ae11d6c 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -33,16 +33,17 @@ import (
// PutObjectOptions represents options specified by user for PutObject call
type PutObjectOptions struct {
- UserMetadata map[string]string
- Progress io.Reader
- ContentType string
- ContentEncoding string
- ContentDisposition string
- ContentLanguage string
- CacheControl string
- ServerSideEncryption encrypt.ServerSide
- NumThreads uint
- StorageClass string
+ UserMetadata map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ ContentLanguage string
+ CacheControl string
+ ServerSideEncryption encrypt.ServerSide
+ NumThreads uint
+ StorageClass string
+ WebsiteRedirectLocation string
}
// getNumThreads - gets the number of threads to be used in the multipart
@@ -84,6 +85,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.StorageClass != "" {
header[amzStorageClass] = []string{opts.StorageClass}
}
+ if opts.WebsiteRedirectLocation != "" {
+ header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation}
+ }
for k, v := range opts.UserMetadata {
if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) {
header["X-Amz-Meta-"+k] = []string{v}
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 5356f8a4f..3b054c34a 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -66,6 +66,9 @@ var defaultFilterKeys = []string{
"x-amz-bucket-region",
"x-amz-request-id",
"x-amz-id-2",
+ "Content-Security-Policy",
+ "X-Xss-Protection",
+
// Add new headers to be ignored.
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index daf3ec2c2..03778b04c 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -99,7 +99,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "6.0.0"
+ libraryVersion = "6.0.1"
)
// User Agent should always following the below style.
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 7db5a99af..737742318 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -27,10 +27,6 @@ const absMinPartSize = 1024 * 1024 * 5
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
-// copyPartSize - default (and maximum) part size to copy in a
-// copy-object request (5GiB)
-const copyPartSize = 1024 * 1024 * 1024 * 5
-
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
@@ -61,3 +57,6 @@ const (
// Storage class header constant.
const amzStorageClass = "X-Amz-Storage-Class"
+
+// Website redirect location header constant
+const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
index cf2ba0537..31dbcd12e 100644
--- a/vendor/github.com/minio/minio-go/core.go
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -82,6 +82,8 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
opts.ContentType = v
} else if strings.ToLower(k) == "cache-control" {
opts.CacheControl = v
+ } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) {
+ opts.WebsiteRedirectLocation = v
} else {
m[k] = metadata[k]
}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
index c8236d69b..421b30e8e 100644
--- a/vendor/github.com/minio/minio-go/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -3474,15 +3474,13 @@ func testFunctional() {
function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
functionAll += ", " + function
- readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
-
+ readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
args = map[string]interface{}{
"bucketName": bucketName,
"bucketPolicy": readOnlyPolicy,
}
err = c.SetBucketPolicy(bucketName, readOnlyPolicy)
-
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
@@ -3494,14 +3492,12 @@ func testFunctional() {
"bucketName": bucketName,
}
readOnlyPolicyRet, err := c.GetBucketPolicy(bucketName)
-
if err != nil {
logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
return
}
-
- if strings.Compare(readOnlyPolicyRet, readOnlyPolicy) != 0 {
- logError(testName, function, args, startTime, "", "policy should be set to readonly", err)
+ if readOnlyPolicyRet == "" {
+ logError(testName, function, args, startTime, "", "policy should be set", err)
return
}
@@ -3509,8 +3505,7 @@ func testFunctional() {
function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
functionAll += ", " + function
- writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
-
+ writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
args = map[string]interface{}{
"bucketName": bucketName,
"bucketPolicy": writeOnlyPolicy,
@@ -3534,8 +3529,8 @@ func testFunctional() {
return
}
- if strings.Compare(writeOnlyPolicyRet, writeOnlyPolicy) != 0 {
- logError(testName, function, args, startTime, "", "policy should be set to writeonly", err)
+ if writeOnlyPolicyRet == "" {
+ logError(testName, function, args, startTime, "", "policy should be set", err)
return
}
@@ -3543,7 +3538,7 @@ func testFunctional() {
function = "SetBucketPolicy(bucketName, readWritePolicy)"
functionAll += ", " + function
- readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
+ readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
args = map[string]interface{}{
"bucketName": bucketName,
@@ -3567,8 +3562,8 @@ func testFunctional() {
return
}
- if strings.Compare(readWritePolicyRet, readWritePolicy) != 0 {
- logError(testName, function, args, startTime, "", "policy should be set to readwrite", err)
+ if readWritePolicyRet == "" {
+ logError(testName, function, args, startTime, "", "policy should be set", err)
return
}
@@ -5995,7 +5990,11 @@ func testStorageClassMetadataPutObject() {
logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
return
}
-
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
successLogger(testName, function, args, startTime).Info()
}
@@ -6036,7 +6035,11 @@ func testStorageClassInvalidMetadataPutObject() {
logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
return
}
-
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
successLogger(testName, function, args, startTime).Info()
}
@@ -6136,7 +6139,11 @@ func testStorageClassMetadataCopyObject() {
logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
return
}
-
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
successLogger(testName, function, args, startTime).Info()
}
@@ -6497,7 +6504,7 @@ func testFunctionalV2() {
function = "SetBucketPolicy(bucketName, bucketPolicy)"
functionAll += ", " + function
- readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads,s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `/*"],"Sid": ""}]}`
+ readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}`
args = map[string]interface{}{
"bucketName": bucketName,
@@ -6558,9 +6565,9 @@ func testFunctionalV2() {
return
}
- objectName_noLength := objectName + "-nolength"
- args["objectName"] = objectName_noLength
- n, err = c.PutObject(bucketName, objectName_noLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ objectNameNoLength := objectName + "-nolength"
+ args["objectName"] = objectNameNoLength
+ n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
index 49d6dcdf5..22c94347e 100644
--- a/vendor/github.com/minio/minio-go/retry.go
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -131,6 +131,7 @@ var retryableS3Codes = map[string]struct{}{
"InternalError": {},
"ExpiredToken": {},
"ExpiredTokenException": {},
+ "SlowDown": {},
// Add more AWS S3 codes here.
}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 2f02ac89f..f80e25c41 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -222,6 +222,7 @@ var supportedHeaders = []string{
"content-encoding",
"content-disposition",
"content-language",
+ "x-amz-website-redirect-location",
// Add more supported headers here.
}