From baba8fa92f47cad604f6d2a2714d09f89178fbff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jes=C3=BAs=20Espino?= Date: Mon, 30 Apr 2018 23:35:10 +0200 Subject: Upgrading minio-go library to 6.0.0 (#8651) * Upgrading minio-go library to 6.0.0 * Removing unnecesary Gopkg constraint --- vendor/github.com/minio/minio-go/Makefile | 4 +- vendor/github.com/minio/minio-go/README.md | 5 - vendor/github.com/minio/minio-go/README_zh_CN.md | 1 - .../minio/minio-go/api-compose-object.go | 117 +- .../minio/minio-go/api-get-object-file.go | 13 +- vendor/github.com/minio/minio-go/api-get-object.go | 37 +- .../github.com/minio/minio-go/api-get-options.go | 8 +- vendor/github.com/minio/minio-go/api-get-policy.go | 53 +- .../github.com/minio/minio-go/api-notification.go | 12 +- vendor/github.com/minio/minio-go/api-presigned.go | 4 +- vendor/github.com/minio/minio-go/api-put-bucket.go | 58 +- .../minio/minio-go/api-put-object-context.go | 6 - .../minio/minio-go/api-put-object-copy.go | 36 +- .../minio/minio-go/api-put-object-encrypted.go | 44 - .../minio/minio-go/api-put-object-multipart.go | 17 +- .../minio/minio-go/api-put-object-streaming.go | 4 +- vendor/github.com/minio/minio-go/api-put-object.go | 37 +- vendor/github.com/minio/minio-go/api-remove.go | 21 +- vendor/github.com/minio/minio-go/api-stat.go | 2 +- vendor/github.com/minio/minio-go/api.go | 29 +- vendor/github.com/minio/minio-go/appveyor.yml | 1 + vendor/github.com/minio/minio-go/bucket-cache.go | 4 +- .../minio/minio-go/bucket-notification.go | 69 +- vendor/github.com/minio/minio-go/constants.go | 7 - vendor/github.com/minio/minio-go/core.go | 16 +- .../github.com/minio/minio-go/functional_tests.go | 1619 +++++++++++++------- .../github.com/minio/minio-go/pkg/encrypt/cbc.go | 294 ---- .../minio/minio-go/pkg/encrypt/interface.go | 54 - .../github.com/minio/minio-go/pkg/encrypt/keys.go | 166 -- .../minio/minio-go/pkg/encrypt/server-side.go | 195 +++ .../minio-go/pkg/policy/bucket-policy-condition.go | 116 -- .../minio/minio-go/pkg/policy/bucket-policy.go | 635 -------- .../minio-go/pkg/s3signer/request-signature-v2.go | 47 +- vendor/github.com/minio/minio-go/retry.go | 3 + vendor/github.com/minio/minio-go/transport.go | 10 +- vendor/github.com/minio/minio-go/utils.go | 21 +- 36 files changed, 1539 insertions(+), 2226 deletions(-) delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-encrypted.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/interface.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/keys.go create mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go (limited to 'vendor/github.com') diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile index 05081c723..bad81ffaf 100644 --- a/vendor/github.com/minio/minio-go/Makefile +++ b/vendor/github.com/minio/minio-go/Makefile @@ -3,10 +3,10 @@ all: checks checks: @go get -t ./... @go vet ./... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... @go get github.com/dustin/go-humanize/... @go get github.com/sirupsen/logrus/... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done @go get -u github.com/a8m/mark/... @go get -u github.com/minio/cli/... diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md index 2dedc1a28..91b42049f 100644 --- a/vendor/github.com/minio/minio-go/README.md +++ b/vendor/github.com/minio/minio-go/README.md @@ -130,7 +130,6 @@ The full API Reference is available here. ### API Reference : Bucket policy Operations * [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) * [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) -* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies) ### API Reference : Bucket notification Operations * [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) @@ -156,10 +155,6 @@ The full API Reference is available here. * [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) * [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -### API Reference: Encrypted Object Operations -* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) -* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) - ### API Reference : Presigned Operations * [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) * [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md index 5584f4255..a5acf199e 100644 --- a/vendor/github.com/minio/minio-go/README_zh_CN.md +++ b/vendor/github.com/minio/minio-go/README_zh_CN.md @@ -141,7 +141,6 @@ mc ls play/mymusic/ ### API文档 : 存储桶策略 * [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) * [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) -* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies) ### API文档 : 存储桶通知 * [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go index 88b60d604..99b2adae8 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -19,7 +19,6 @@ package minio import ( "context" - "encoding/base64" "fmt" "net/http" "net/url" @@ -27,58 +26,15 @@ import ( "strings" "time" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// SSEInfo - represents Server-Side-Encryption parameters specified by -// a user. -type SSEInfo struct { - key []byte - algo string -} - -// NewSSEInfo - specifies (binary or un-encoded) encryption key and -// algorithm name. If algo is empty, it defaults to "AES256". Ref: -// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -func NewSSEInfo(key []byte, algo string) SSEInfo { - if algo == "" { - algo = "AES256" - } - return SSEInfo{key, algo} -} - -// internal method that computes SSE-C headers -func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string { - if s == nil { - return nil - } - - cs := "" - if isCopySource { - cs = "copy-source-" - } - return map[string]string{ - "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, - "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), - "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key), - } -} - -// GetSSEHeaders - computes and returns headers for SSE-C as key-value -// pairs. They can be set as metadata in PutObject* requests (for -// encryption) or be set as request headers in `Core.GetObject` (for -// decryption). -func (s *SSEInfo) GetSSEHeaders() map[string]string { - return s.getSSEHeaders(false) -} - // DestinationInfo - type with information about the object to be // created via server-side copy requests, using the Compose API. type DestinationInfo struct { bucket, object string - - // key for encrypting destination - encryption *SSEInfo + encryption encrypt.ServerSide // if no user-metadata is provided, it is copied from source // (when there is only once source object in the compose @@ -97,9 +53,7 @@ type DestinationInfo struct { // if needed. If nil is passed, and if only a single source (of any // size) is provided in the ComposeObject call, then metadata from the // source is copied to the destination. -func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, - userMeta map[string]string) (d DestinationInfo, err error) { - +func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucket); err != nil { return d, err @@ -125,7 +79,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, return DestinationInfo{ bucket: bucket, object: object, - encryption: encryptSSEC, + encryption: sse, userMetadata: m, }, nil } @@ -154,10 +108,8 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma // server-side copying APIs. type SourceInfo struct { bucket, object string - - start, end int64 - - decryptKey *SSEInfo + start, end int64 + encryption encrypt.ServerSide // Headers to send with the upload-part-copy request involving // this source object. Headers http.Header @@ -169,23 +121,17 @@ type SourceInfo struct { // `decryptSSEC` is the decryption key using server-side-encryption // with customer provided key. It may be nil if the source is not // encrypted. -func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo { +func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { r := SourceInfo{ bucket: bucket, object: object, start: -1, // range is unspecified by default - decryptKey: decryptSSEC, + encryption: sse, Headers: make(http.Header), } // Set the source header r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) - - // Assemble decryption headers for upload-part-copy request - for k, v := range decryptSSEC.getSSEHeaders(true) { - r.Headers.Set(k, v) - } - return r } @@ -245,10 +191,7 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s // Get object info - need size and etag here. Also, decryption // headers are added to the stat request if given. var objInfo ObjectInfo - opts := StatObjectOptions{} - for k, v := range s.decryptKey.getSSEHeaders(false) { - opts.Set(k, v) - } + opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) if err != nil { err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) @@ -478,37 +421,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { // involved, it is being copied wholly and at most 5GiB in // size, emptyfiles are also supported). if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - h := srcs[0].Headers - // Add destination encryption headers - for k, v := range dst.encryption.getSSEHeaders(false) { - h.Set(k, v) - } - - // If no user metadata is specified (and so, the - // for-loop below is not entered), metadata from the - // source is copied to the destination (due to - // single-part copy-object PUT request behaviour). - for k, v := range dst.getUserMetaHeadersMap(true) { - h.Set(k, v) - } - - // Send copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: dst.bucket, - objectName: dst.object, - customHeader: h, - }) - defer closeResponse(resp) - if err != nil { - return err - } - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, dst.bucket, dst.object) - } - - // Return nil on success. - return nil + return c.CopyObject(dst, srcs[0]) } // Now, handle multipart-copy cases. @@ -527,7 +440,8 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { for k, v := range metaMap { metaHeaders[k] = v } - uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders}) + + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) if err != nil { return err } @@ -537,9 +451,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { partIndex := 1 for i, src := range srcs { h := src.Headers + if src.encryption != nil { + src.encryption.Marshal(h) + } // Add destination encryption headers - for k, v := range dst.encryption.getSSEHeaders(false) { - h.Set(k, v) + if dst.encryption != nil { + dst.encryption.Marshal(h) } // calculate start/end indices of parts after diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index 2b58220a6..a852220a2 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -18,14 +18,11 @@ package minio import ( + "context" "io" "os" "path/filepath" - "github.com/minio/minio-go/pkg/encrypt" - - "context" - "github.com/minio/minio-go/pkg/s3utils" ) @@ -40,14 +37,6 @@ func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObje return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) } -// FGetEncryptedObject - Decrypt and store an object at filePath. -func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error { - if materials == nil { - return ErrInvalidArgument("Unable to recognize empty encryption properties") - } - return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials}) -} - // fGetObjectWithContext - fgetObject wrapper function with context func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 50bbc2201..0bf556ec6 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -27,20 +27,9 @@ import ( "sync" "time" - "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials, -// returned stream should be closed by the caller. -func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) { - if encryptMaterials == nil { - return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") - } - - return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials}) -} - // GetObject - returns an seekable, readable object. func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) @@ -127,6 +116,9 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } else { // First request is a Stat or Seek call. // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) if err != nil { resCh <- getResponse{ @@ -142,6 +134,8 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } } } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") if etag != "" { opts.SetMatchETag(etag) } @@ -381,13 +375,11 @@ func (o *Object) Stat() (ObjectInfo, error) { // This is the first request. if !o.isStarted || !o.objectInfoSet { - statReq := getRequest{ + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ isFirstReq: !o.isStarted, settingObjectInfo: !o.objectInfoSet, - } - - // Send the request and get the response. - _, err := o.doGetRequest(statReq) + }) if err != nil { o.prevErr = err return ObjectInfo{}, err @@ -493,7 +485,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { // Negative offset is valid for whence of '2'. if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) } // This is the first request. So before anything else @@ -662,15 +654,6 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op Metadata: extractObjMetadata(resp.Header), } - reader := resp.Body - if opts.Materials != nil { - err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey)) - if err != nil { - return nil, ObjectInfo{}, err - } - reader = opts.Materials - } - // do not close body here, caller will close - return reader, objectStat, nil + return resp.Body, objectStat, nil } diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go index dd70415cd..a5a87526f 100644 --- a/vendor/github.com/minio/minio-go/api-get-options.go +++ b/vendor/github.com/minio/minio-go/api-get-options.go @@ -28,9 +28,8 @@ import ( // GetObjectOptions are used to specify additional headers or options // during GET requests. type GetObjectOptions struct { - headers map[string]string - - Materials encrypt.Materials + headers map[string]string + ServerSideEncryption encrypt.ServerSide } // StatObjectOptions are used to specify additional headers or options @@ -45,6 +44,9 @@ func (o GetObjectOptions) Header() http.Header { for k, v := range o.headers { headers.Set(k, v) } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 { + o.ServerSideEncryption.Marshal(headers) + } return headers } diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index a4259c9d7..12d4c590e 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -19,62 +19,32 @@ package minio import ( "context" - "encoding/json" "io/ioutil" "net/http" "net/url" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/s3utils" ) // GetBucketPolicy - get bucket policy at a given path. -func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) { +func (c Client) GetBucketPolicy(bucketName string) (string, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return policy.BucketPolicyNone, err + return "", err } - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return policy.BucketPolicyNone, err - } - policyInfo, err := c.getBucketPolicy(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return policy.BucketPolicyNone, nil - } - return policy.BucketPolicyNone, err - } - return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil -} - -// ListBucketPolicies - list all policies for a given prefix and all its children. -func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return map[string]policy.BucketPolicy{}, err - } - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return map[string]policy.BucketPolicy{}, err - } - policyInfo, err := c.getBucketPolicy(bucketName) + bucketPolicy, err := c.getBucketPolicy(bucketName) if err != nil { errResponse := ToErrorResponse(err) if errResponse.Code == "NoSuchBucketPolicy" { - return map[string]policy.BucketPolicy{}, nil + return "", nil } - return map[string]policy.BucketPolicy{}, err + return "", err } - return policy.GetPolicies(policyInfo.Statements, bucketName), nil -} - -// Default empty bucket access policy. -var emptyBucketAccessPolicy = policy.BucketAccessPolicy{ - Version: "2012-10-17", + return bucketPolicy, nil } // Request server for current bucket policy. -func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) { +func (c Client) getBucketPolicy(bucketName string) (string, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -89,21 +59,20 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e defer closeResponse(resp) if err != nil { - return emptyBucketAccessPolicy, err + return "", err } if resp != nil { if resp.StatusCode != http.StatusOK { - return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "") + return "", httpRespToErrorResponse(resp, bucketName, "") } } bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) if err != nil { - return emptyBucketAccessPolicy, err + return "", err } - policy := policy.BucketAccessPolicy{} - err = json.Unmarshal(bucketPolicyBuf, &policy) + policy := string(bucketPolicyBuf) return policy, err } diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index 578fdea8e..1c01e362b 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -205,13 +205,11 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { continue } - // Send notifications on channel only if there are events received. - if len(notificationInfo.Records) > 0 { - select { - case notificationInfoCh <- notificationInfo: - case <-doneCh: - return - } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + return } } // Look for any underlying errors. diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index 8b0258948..a2c060786 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -119,7 +119,9 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } - u, err = c.makeTargetURL(bucketName, "", location, nil) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index bb583a78f..8920ac742 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -20,13 +20,12 @@ package minio import ( "bytes" "context" - "encoding/json" "encoding/xml" - "fmt" + "io/ioutil" "net/http" "net/url" + "strings" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/s3utils" ) @@ -101,73 +100,40 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { // SetBucketPolicy set the access permissions on an existing bucket. // -// For example -// -// none - owner gets full access [default]. -// readonly - anonymous get access for everyone at a given object prefix. -// readwrite - anonymous list/put/delete access to a given object prefix. -// writeonly - anonymous put/delete access to a given object prefix. -func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error { +func (c Client) SetBucketPolicy(bucketName, policy string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return err - } - - if !bucketPolicy.IsValidBucketPolicy() { - return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy)) - } - - policyInfo, err := c.getBucketPolicy(bucketName) - errResponse := ToErrorResponse(err) - if err != nil && errResponse.Code != "NoSuchBucketPolicy" { - return err - } - - if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil { - // As the request is for removing policy and the bucket - // has empty policy statements, just return success. - return nil - } - - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix) // Save the updated policies. - return c.putBucketPolicy(bucketName, policyInfo) + return c.putBucketPolicy(bucketName, policy) } // Saves a new bucket policy. -func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error { +func (c Client) putBucketPolicy(bucketName, policy string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - // If there are no policy statements, we should remove entire policy. - if len(policyInfo.Statements) == 0 { - return c.removeBucketPolicy(bucketName) - } - // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) urlValues.Set("policy", "") - policyBytes, err := json.Marshal(&policyInfo) + // Content-length is mandatory for put policy request + policyReader := strings.NewReader(policy) + b, err := ioutil.ReadAll(policyReader) if err != nil { return err } - policyBuffer := bytes.NewReader(policyBytes) reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: policyBuffer, - contentLength: int64(len(policyBytes)), - contentMD5Base64: sumMD5Base64(policyBytes), - contentSHA256Hex: sum256Hex(policyBytes), + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyReader, + contentLength: int64(len(b)), } // Execute PUT to upload a new bucket policy. diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go index a6f23dcaa..ff4663e2f 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-context.go +++ b/vendor/github.com/minio/minio-go/api-put-object-context.go @@ -29,11 +29,5 @@ func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName if err != nil { return 0, err } - if opts.EncryptMaterials != nil { - if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil { - return 0, err - } - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts) - } return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go index 8032009dc..acd195fcd 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -17,7 +17,41 @@ package minio +import ( + "context" + "net/http" + + "github.com/minio/minio-go/pkg/encrypt" +) + // CopyObject - copy a source object into a new object func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { - return c.ComposeObject(dst, []SourceInfo{src}) + header := make(http.Header) + for k, v := range src.Headers { + header[k] = v + } + if src.encryption != nil { + encrypt.SSECopy(src.encryption).Marshal(header) + } + if dst.encryption != nil { + dst.encryption.Marshal(header) + } + for k, v := range dst.getUserMetaHeadersMap(true) { + header.Set(k, v) + } + + resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ + bucketName: dst.bucket, + objectName: dst.object, + customHeader: header, + }) + if err != nil { + return err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, dst.bucket, dst.object) + } + return nil } diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go deleted file mode 100644 index 87dd1ab1a..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - - "github.com/minio/minio-go/pkg/encrypt" -) - -// PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) { - - if encryptMaterials == nil { - return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") - } - - if err := encryptMaterials.SetupEncryptMode(reader); err != nil { - return 0, err - } - - return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials}) -} - -// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath. -func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) { - return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials}) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index 5262e8b91..52dc069d0 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -33,6 +33,7 @@ import ( "strconv" "strings" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) @@ -138,7 +139,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Proceed to upload the part. var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), opts.UserMetadata) + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } @@ -226,11 +227,9 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN return initiateMultipartUploadResult, nil } -const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" - // uploadPart - Uploads a part in a multipart upload. func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) { + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -260,13 +259,9 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID // Set encryption headers, if any. customHeader := make(http.Header) - // for k, v := range metadata { - // if len(v) > 0 { - // if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { - // customHeader.Set(k, v) - // } - // } - // } + if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS { + sse.Marshal(customHeader) + } reqMetadata := requestMetadata{ bucketName: bucketName, diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go index be1dc57ef..211d1c23c 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -167,7 +167,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, - "", "", partSize, opts.UserMetadata) + "", "", partSize, opts.ServerSideEncryption) if err != nil { uploadedPartsCh <- uploadedPartRes{ Size: 0, @@ -280,7 +280,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, io.LimitReader(hookReader, partSize), - partNumber, "", "", partSize, opts.UserMetadata) + partNumber, "", "", partSize, opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index ca4052225..2402a7167 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -33,15 +33,16 @@ import ( // PutObjectOptions represents options specified by user for PutObject call type PutObjectOptions struct { - UserMetadata map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - CacheControl string - EncryptMaterials encrypt.Materials - NumThreads uint - StorageClass string + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string } // getNumThreads - gets the number of threads to be used in the multipart @@ -71,19 +72,20 @@ func (opts PutObjectOptions) Header() (header http.Header) { if opts.ContentDisposition != "" { header["Content-Disposition"] = []string{opts.ContentDisposition} } + if opts.ContentLanguage != "" { + header["Content-Language"] = []string{opts.ContentLanguage} + } if opts.CacheControl != "" { header["Cache-Control"] = []string{opts.CacheControl} } - if opts.EncryptMaterials != nil { - header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()} - header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()} - header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()} + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) } if opts.StorageClass != "" { header[amzStorageClass] = []string{opts.StorageClass} } for k, v := range opts.UserMetadata { - if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) && !isStorageClassHeader(k) { + if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { header["X-Amz-Meta-"+k] = []string{v} } else { header[k] = []string{v} @@ -92,11 +94,10 @@ func (opts PutObjectOptions) Header() (header http.Header) { return } -// validate() checks if the UserMetadata map has standard headers or client side -// encryption headers and raises an error if so. +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isCSEHeader(k) || isStorageClassHeader(k) { + if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { return ErrInvalidArgument(k + " unsupported user defined metadata name") } if !httplex.ValidHeaderFieldValue(v) { @@ -217,7 +218,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Proceed to upload the part. var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - "", "", int64(length), opts.UserMetadata) + "", "", int64(length), opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index f14b2eb7f..c2ffcdd34 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -129,10 +129,8 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh } } -// RemoveObjects remove multiples objects from a bucket. -// The list of objects to remove are received from objectsCh. -// Remove failures are sent back via error channel. -func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { +// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. +func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. @@ -189,7 +187,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // Generate remove multi objects XML request removeBytes := generateRemoveMultiObjectsRequest(batch) // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{ + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentBody: bytes.NewReader(removeBytes), @@ -197,6 +195,12 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan contentMD5Base64: sumMD5Base64(removeBytes), contentSHA256Hex: sum256Hex(removeBytes), }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + errorCh <- RemoveObjectError{ObjectName: "", Err: e} + } + } if err != nil { for _, b := range batch { errorCh <- RemoveObjectError{ObjectName: b, Err: err} @@ -213,6 +217,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan return errorCh } +// RemoveObjects removes multiple objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) +} + // RemoveIncompleteUpload aborts an partially uploaded object. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 8904dd678..5356f8a4f 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -115,7 +115,7 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o return ObjectInfo{}, err } if resp != nil { - if resp.StatusCode != http.StatusOK { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index fa8595bcd..daf3ec2c2 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * Copyright 2015-2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -99,7 +99,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "4.0.7" + libraryVersion = "6.0.0" ) // User Agent should always following the below style. @@ -258,8 +258,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { } switch { case signerType.IsV2(): - // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + return errors.New("signature V2 cannot support redirection") case signerType.IsV4(): req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) } @@ -288,7 +287,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ - Transport: defaultMinioTransport, + Transport: DefaultTransport, CheckRedirect: clnt.redirectHeaders, } @@ -338,7 +337,7 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { // TLSClientConfig: &tls.Config{RootCAs: pool}, // DisableCompression: true, // } - // api.SetTransport(tr) + // api.SetCustomTransport(tr) // if c.httpClient != nil { c.httpClient.Transport = customHTTPTransport @@ -694,8 +693,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } } + // Look if target url supports virtual host. + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) + // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues) + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) if err != nil { return nil, err } @@ -737,7 +739,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } if signerType.IsV2() { // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires) + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) } else if signerType.IsV4() { // Presign URL with signature v4. req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) @@ -783,7 +785,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R switch { case signerType.IsV2(): // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: // Streaming signature is used by default for a PUT object request. Additionally we also // look if the initialized client is secure, if yes then we don't need to perform @@ -815,7 +817,7 @@ func (c Client) setUserAgent(req *http.Request) { } // makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { host := c.endpointURL.Host // For Amazon S3 endpoint, try to fetch location based endpoint. if s3utils.IsAmazonEndpoint(*c.endpointURL) { @@ -854,8 +856,6 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // Make URL only if bucketName is available, otherwise use the // endpoint URL. if bucketName != "" { - // Save if target url will have buckets which suppport virtual host. - isVirtualHostStyle := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support // virtual host style. @@ -883,12 +883,17 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // returns true if virtual hosted style requests are to be used. func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + if c.lookup == BucketLookupDNS { return true } if c.lookup == BucketLookupPath { return false } + // default to virtual only for Amazon/Google storage. In all other cases use // path style requests return s3utils.IsVirtualHostSupported(url, bucketName) diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index b93b4d45d..aa9f840e5 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -19,6 +19,7 @@ install: - go get -u github.com/golang/lint/golint - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign + - go get -u golang.org/x/crypto/argon2 - go get -t ./... # to run your custom scripts instead of automatic MSBuild diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index 5d56cdf42..cac7ad792 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -203,7 +203,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro } if signerType.IsV2() { - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) return req, nil } diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go index 1b9d6a0c7..ea303dd9d 100644 --- a/vendor/github.com/minio/minio-go/bucket-notification.go +++ b/vendor/github.com/minio/minio-go/bucket-notification.go @@ -19,7 +19,8 @@ package minio import ( "encoding/xml" - "reflect" + + "github.com/minio/minio-go/pkg/set" ) // NotificationEventType is a S3 notification event associated to the bucket notification configuration @@ -96,7 +97,7 @@ type NotificationConfig struct { // NewNotificationConfig creates one notification config and sets the given ARN func NewNotificationConfig(arn Arn) NotificationConfig { - return NotificationConfig{Arn: arn} + return NotificationConfig{Arn: arn, Filter: &Filter{}} } // AddEvents adds one event to the current notification config @@ -163,39 +164,79 @@ type BucketNotification struct { } // AddTopic adds a given topic config to the general bucket notification config -func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) { +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} for _, n := range b.TopicConfigs { - if reflect.DeepEqual(n, newTopicConfig) { - // Avoid adding duplicated entry - return + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true } // AddQueue adds a given queue config to the general bucket notification config -func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) { +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} for _, n := range b.QueueConfigs { - if reflect.DeepEqual(n, newQueueConfig) { - // Avoid adding duplicated entry - return + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true } // AddLambda adds a given lambda config to the general bucket notification config -func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) { +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} for _, n := range b.LambdaConfigs { - if reflect.DeepEqual(n, newLambdaConfig) { - // Avoid adding duplicated entry - return + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true } // RemoveTopicByArn removes all topic configurations that match the exact specified ARN diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 84b6cfdf3..7db5a99af 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -59,12 +59,5 @@ const ( iso8601DateFormat = "20060102T150405Z" ) -// Encryption headers stored along with the object. -const ( - amzHeaderIV = "X-Amz-Meta-X-Amz-Iv" - amzHeaderKey = "X-Amz-Meta-X-Amz-Key" - amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc" -) - // Storage class header constant. const amzStorageClass = "X-Amz-Storage-Class" diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index 4245fc065..cf2ba0537 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -21,8 +21,6 @@ import ( "context" "io" "strings" - - "github.com/minio/minio-go/pkg/policy" ) // Core - Inherits Client and adds new methods to expose the low level S3 APIs. @@ -78,6 +76,8 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba opts.ContentEncoding = v } else if strings.ToLower(k) == "content-disposition" { opts.ContentDisposition = v + } else if strings.ToLower(k) == "content-language" { + opts.ContentLanguage = v } else if strings.ToLower(k) == "content-type" { opts.ContentType = v } else if strings.ToLower(k) == "cache-control" { @@ -103,13 +103,7 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) { - return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil) -} - -// PutObjectPartWithMetadata - upload an object part with additional request metadata. -func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader, - size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) { - return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata) + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil) } // ListObjectParts - List uploaded parts of an incomplete upload.x @@ -131,12 +125,12 @@ func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { } // GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) { +func (c Core) GetBucketPolicy(bucket string) (string, error) { return c.getBucketPolicy(bucket) } // PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error { +func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { return c.putBucketPolicy(bucket, bucketPolicy) } diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go index c4156c293..c8236d69b 100644 --- a/vendor/github.com/minio/minio-go/functional_tests.go +++ b/vendor/github.com/minio/minio-go/functional_tests.go @@ -22,7 +22,6 @@ package main import ( "bytes" "context" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -45,7 +44,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/policy" ) const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" @@ -707,13 +705,12 @@ func testPutObjectWithMetadata() { successLogger(testName, function, args, startTime).Info() } -// Test put object with streaming signature. -func testPutObjectStreaming() { +func testPutObjectWithContentLanguage() { // initialize logging params objectName := "test-object" startTime := time.Now() testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" + function := "PutObject(bucketName, objectName, reader, size, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": objectName, @@ -752,21 +749,29 @@ func testPutObjectStreaming() { return } - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} + data := bytes.Repeat([]byte("a"), int(0)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en-US", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } - for _, size := range sizes { - data := bytes.Repeat([]byte("a"), int(size)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } + if n != 0 { + logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err) + return + } - if n != size { - logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) - return - } + objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en-US" { + logError(testName, function, args, startTime, "", "Expected content-language 'en-US' doesn't match with StatObject return value", err) + return } // Delete all objects and buckets @@ -778,23 +783,25 @@ func testPutObjectStreaming() { successLogger(testName, function, args, startTime).Info() } -// Test listing partially uploaded objects. -func testListPartiallyUploaded() { +// Test put object with streaming signature. +func testPutObjectStreaming() { // initialize logging params + objectName := "test-object" startTime := time.Now() testName := getFuncName() - function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + function := "PutObject(bucketName, objectName, reader,size,opts)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "isRecursive": "", + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", } // Seed random based on current time. rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( + c, err := minio.NewV4( os.Getenv(serverEndpoint), os.Getenv(accessKey), os.Getenv(secretKey), @@ -805,16 +812,15 @@ func testListPartiallyUploaded() { return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + // Set user agent. c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName - // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { @@ -822,46 +828,19 @@ func testListPartiallyUploaded() { return } - bufSize := dataFileMap["datafile-65-MB"] - r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2)) + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} - reader, writer := io.Pipe() - go func() { - i := 0 - for i < 25 { - _, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25) - if cerr != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - i++ - r.Seek(0, 0) + for _, size := range sizes { + data := bytes.Repeat([]byte("a"), int(size)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return } - writer.CloseWithError(errors.New("proactively closed to be verified later")) - }() - - objectName := bucketName + "-resumable" - args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject should fail", err) - return - } - if !strings.Contains(err.Error(), "proactively closed to be verified later") { - logError(testName, function, args, startTime, "", "String not found in PutObject output", err) - return - } - - doneCh := make(chan struct{}) - defer close(doneCh) - isRecursive := true - args["isRecursive"] = isRecursive - - multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) - for multiPartObject := range multiPartObjectCh { - if multiPartObject.Err != nil { - logError(testName, function, args, startTime, "", "Multipart object error", multiPartObject.Err) + if n != size { + logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) return } } @@ -1101,27 +1080,26 @@ func testGetObjectClosedTwice() { successLogger(testName, function, args, startTime).Info() } -// Test removing multiple objects with Remove API -func testRemoveMultipleObjects() { - // initialize logging params +// Test RemoveObjectsWithContext request context cancels after timeout +func testRemoveObjectsWithContext() { + // Initialize logging params. startTime := time.Now() testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" + function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" args := map[string]interface{}{ "bucketName": "", } - // Seed random based on current time. + // Seed random based on current tie. rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. + // Instantiate new minio client. c, err := minio.New( os.Getenv(serverEndpoint), os.Getenv(accessKey), os.Getenv(secretKey), mustParseBool(os.Getenv(enableHTTPS)), ) - if err != nil { logError(testName, function, args, startTime, "", "Minio client object creation failed", err) return @@ -1129,7 +1107,6 @@ func testRemoveMultipleObjects() { // Set user agent. c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -1141,19 +1118,16 @@ func testRemoveMultipleObjects() { err = c.MakeBucket(bucketName, "us-east-1") if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return } + // Generate put data. r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - // Multi remove of 1100 objects - nrObjects := 200 - + // Multi remove of 20 objects. + nrObjects := 20 objectsCh := make(chan string) - go func() { defer close(objectsCh) - // Upload objects and send them to objectsCh for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) @@ -1164,35 +1138,52 @@ func testRemoveMultipleObjects() { objectsCh <- objectName } }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() - // Call RemoveObjects API - errorCh := c.RemoveObjects(bucketName, objectsCh) - - // Check if errorCh doesn't receive any error + // Call RemoveObjectsWithContext API with short timeout. + errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) select { case r, more := <-errorCh: - if more { + if more || r.Err != nil { logError(testName, function, args, startTime, "", "Unexpected error", r.Err) return } } - // Delete all objects and buckets + // Delete all objects and buckets. if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) return } - successLogger(testName, function, args, startTime).Info() } -// Tests removing partially uploaded objects. -func testRemovePartiallyUploaded() { +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "RemoveIncompleteUpload(bucketName, objectName)" - args := map[string]interface{}{} + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } // Seed random based on current time. rand.Seed(time.Now().Unix()) @@ -1204,6 +1195,7 @@ func testRemovePartiallyUploaded() { os.Getenv(secretKey), mustParseBool(os.Getenv(enableHTTPS)), ) + if err != nil { logError(testName, function, args, startTime, "", "Minio client object creation failed", err) return @@ -1226,40 +1218,39 @@ func testRemovePartiallyUploaded() { return } - r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan string) - reader, writer := io.Pipe() go func() { - i := 0 - for i < 25 { - _, cerr := io.CopyN(writer, r, 128*1024) - if cerr != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue } - i++ - r.Seek(0, 0) + objectsCh <- objectName } - writer.CloseWithError(errors.New("proactively closed to be verified later")) }() - objectName := bucketName + "-resumable" - args["objectName"] = objectName + // Call RemoveObjects API + errorCh := c.RemoveObjects(bucketName, objectsCh) - _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject should fail", err) - return - } - if !strings.Contains(err.Error(), "proactively closed to be verified later") { - logError(testName, function, args, startTime, "", "String not found", err) - return - } - err = c.RemoveIncompleteUpload(bucketName, objectName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err) - return + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } } + // Delete all objects and buckets if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) @@ -1912,6 +1903,14 @@ func testGetObjectReadSeekFunctional() { return } + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] var reader = getDataReader("datafile-33-kB") @@ -1938,14 +1937,6 @@ func testGetObjectReadSeekFunctional() { return } - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - // Read the data back r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { @@ -2127,7 +2118,7 @@ func testGetObjectReadAtFunctional() { buf3 := make([]byte, 512) buf4 := make([]byte, 512) - // Test readAt before stat is called. + // Test readAt before stat is called such that objectInfo doesn't change. m, err := r.ReadAt(buf1, offset) if err != nil { logError(testName, function, args, startTime, "", "ReadAt failed", err) @@ -2167,6 +2158,7 @@ func testGetObjectReadAtFunctional() { logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) return } + offset += 512 m, err = r.ReadAt(buf3, offset) if err != nil { @@ -2411,9 +2403,10 @@ func testPresignedPostPolicy() { } expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) return } @@ -2588,6 +2581,10 @@ func testCopyObject() { return } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) @@ -2608,6 +2605,37 @@ func testCopyObject() { return } + // Perform the Copy which should update only metadata. + src = minio.NewSourceInfo(bucketName, objectName, nil) + dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ + "Copy": "should be same", + }) + args["dst"] = dst + args["src"] = src + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(objInfo.ETag) + objInfo, err = c.StatObject(bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + // Delete all objects and buckets if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) @@ -2620,23 +2648,19 @@ func testCopyObject() { successLogger(testName, function, args, startTime).Info() } -// TestEncryptionPutGet tests client side encryption -func testEncryptionPutGet() { +// Tests SSE-C get object ReaderSeeker interface methods. +func testEncryptedGetObjectReadSeekFunctional() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "cbcMaterials": "", - "metadata": "", - } + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + // Seed random based on current time. rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.NewV4( + // Instantiate new minio client object. + c, err := minio.New( os.Getenv(serverEndpoint), os.Getenv(accessKey), os.Getenv(secretKey), @@ -2664,104 +2688,423 @@ func testEncryptionPutGet() { return } - // Generate a symmetric key - symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + + // Generate 65MiB of data. + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") + defer reader.Close() - // Generate an assymmetric key from predefine public and private certificates - privateKey, err := hex.DecodeString( - "30820277020100300d06092a864886f70d0101010500048202613082025d" + - "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + - "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + - "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + - "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + - "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + - "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + - "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + - "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + - "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + - "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + - "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + - "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + - "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + - "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + - "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + - "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + - "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + - "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + - "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + - "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + - "9945cb5c7d") + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + buf, err := ioutil.ReadAll(reader) if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + logError(testName, function, args, startTime, "", "ReadAll failed", err) return } - publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + - "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + - "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + - "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + - "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + - "80a89e43f29b570203010001") + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + logError(testName, function, args, startTime, "", "PutObject failed", err) return } - // Generate an asymmetric key - asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) - if err != nil { - logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) return } - testCases := []struct { - buf []byte - encKey encrypt.Key - }{ - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, - - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return } + defer r.Close() - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } - // Secured object - cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) - args["cbcMaterials"] = cbcMaterials + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } - if err != nil { - logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err) + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { return } - - // Put encrypted data - _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } } - + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 65MiB of data. + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// TestEncryptionPutGet tests client side encryption +func testEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + // Read the data back - r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -2801,13 +3144,13 @@ func testEncryptionFPut() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)" + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "cbcMaterials": "", + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", } // Seed random based on current time. rand.Seed(time.Now().Unix()) @@ -2841,98 +3184,36 @@ func testEncryptionFPut() { return } - // Generate a symmetric key - symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - - // Generate an assymmetric key from predefine public and private certificates - privateKey, err := hex.DecodeString( - "30820277020100300d06092a864886f70d0101010500048202613082025d" + - "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + - "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + - "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + - "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + - "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + - "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + - "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + - "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + - "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + - "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + - "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + - "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + - "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + - "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + - "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + - "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + - "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + - "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + - "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + - "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + - "9945cb5c7d") - - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + - "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + - "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + - "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + - "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + - "80a89e43f29b570203010001") - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - // Generate an asymmetric key - asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) - if err != nil { - logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) - return - } - // Object custom metadata customContentType := "custom/contenttype" args["metadata"] = customContentType testCases := []struct { - buf []byte - encKey encrypt.Key + buf []byte }{ - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, - - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ for i, testCase := range testCases { // Generate a random object name objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName // Secured object - cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) - args["cbcMaterials"] = cbcMaterials + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse - if err != nil { - logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err) - return - } // Generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") file, err := os.Create(fileName) @@ -2947,13 +3228,13 @@ func testEncryptionFPut() { } file.Close() // Put encrypted data - if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil { + if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3101,7 +3382,7 @@ func testFunctional() { startTime := time.Now() testName := getFuncName() function := "testFunctional()" - function_all := "" + functionAll := "" args := map[string]interface{}{} // Seed random based on current time. @@ -3129,7 +3410,7 @@ func testFunctional() { // Make a new bucket. function = "MakeBucket(bucketName, region)" - function_all = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" args["bucketName"] = bucketName err = c.MakeBucket(bucketName, "us-east-1") @@ -3158,7 +3439,7 @@ func testFunctional() { // Verify if bucket exits and you have access. var exists bool function = "BucketExists(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -3174,120 +3455,126 @@ func testFunctional() { } // Asserting the default bucket policy. - function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function args = map[string]interface{}{ - "bucketName": bucketName, - "objectPrefix": "", + "bucketName": bucketName, } - policyAccess, err := c.GetBucketPolicy(bucketName, "") - + nilPolicy, err := c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if policyAccess != "none" { - logError(testName, function, args, startTime, "", "policy should be set to none", err) + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) return } // Set the bucket policy to 'public readonly'. - function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}` + args = map[string]interface{}{ "bucketName": bucketName, - "objectPrefix": "", - "bucketPolicy": policy.BucketPolicyReadOnly, + "bucketPolicy": readOnlyPolicy, } - err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly) + + err = c.SetBucketPolicy(bucketName, readOnlyPolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } // should return policy `readonly`. - function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function args = map[string]interface{}{ - "bucketName": bucketName, - "objectPrefix": "", + "bucketName": bucketName, } - policyAccess, err = c.GetBucketPolicy(bucketName, "") + readOnlyPolicyRet, err := c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if policyAccess != "readonly" { + + if strings.Compare(readOnlyPolicyRet, readOnlyPolicy) != 0 { logError(testName, function, args, startTime, "", "policy should be set to readonly", err) return } // Make the bucket 'public writeonly'. - function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}` + args = map[string]interface{}{ "bucketName": bucketName, - "objectPrefix": "", - "bucketPolicy": policy.BucketPolicyWriteOnly, + "bucketPolicy": writeOnlyPolicy, } - err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly) + err = c.SetBucketPolicy(bucketName, writeOnlyPolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } // should return policy `writeonly`. - function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function args = map[string]interface{}{ - "bucketName": bucketName, - "objectPrefix": "", + "bucketName": bucketName, } - policyAccess, err = c.GetBucketPolicy(bucketName, "") + writeOnlyPolicyRet, err := c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if policyAccess != "writeonly" { + + if strings.Compare(writeOnlyPolicyRet, writeOnlyPolicy) != 0 { logError(testName, function, args, startTime, "", "policy should be set to writeonly", err) return } + // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}` + args = map[string]interface{}{ "bucketName": bucketName, - "objectPrefix": "", - "bucketPolicy": policy.BucketPolicyReadWrite, + "bucketPolicy": readWritePolicy, } - err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) + err = c.SetBucketPolicy(bucketName, readWritePolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } // should return policy `readwrite`. - function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function args = map[string]interface{}{ - "bucketName": bucketName, - "objectPrefix": "", + "bucketName": bucketName, } - policyAccess, err = c.GetBucketPolicy(bucketName, "") - + readWritePolicyRet, err := c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if policyAccess != "readwrite" { + + if strings.Compare(readWritePolicyRet, readWritePolicy) != 0 { logError(testName, function, args, startTime, "", "policy should be set to readwrite", err) return } + // List all buckets. function = "ListBuckets()" - function_all += ", " + function + functionAll += ", " + function args = nil buckets, err := c.ListBuckets() @@ -3320,7 +3607,7 @@ func testFunctional() { buf := bytes.Repeat([]byte("f"), 1<<19) function = "PutObject(bucketName, objectName, reader, contentType)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3363,7 +3650,7 @@ func testFunctional() { isRecursive := true // Recursive is true. function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3384,7 +3671,7 @@ func testFunctional() { objFound = false isRecursive = true // Recursive is true. function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3405,7 +3692,7 @@ func testFunctional() { incompObjNotFound := true function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3424,7 +3711,7 @@ func testFunctional() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3446,9 +3733,10 @@ func testFunctional() { logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) return } + newReader.Close() function = "FGetObject(bucketName, objectName, fileName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3462,7 +3750,7 @@ func testFunctional() { } function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3475,7 +3763,7 @@ func testFunctional() { // Generate presigned HEAD object url. function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3504,7 +3792,7 @@ func testFunctional() { resp.Body.Close() function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3518,7 +3806,7 @@ func testFunctional() { // Generate presigned GET object url. function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3592,7 +3880,7 @@ func testFunctional() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3605,7 +3893,7 @@ func testFunctional() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -3656,7 +3944,7 @@ func testFunctional() { } function = "RemoveObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3692,7 +3980,7 @@ func testFunctional() { } function = "RemoveBucket(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -3720,7 +4008,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "File Remove failed", err) return } - successLogger(testName, function_all, args, startTime).Info() + successLogger(testName, functionAll, args, startTime).Info() } // Test for validating GetObject Reader* methods functioning when the @@ -3916,6 +4204,7 @@ func testPutObjectUploadSeekedObject() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer obj.Close() n, err = obj.Seek(int64(offset), 0) if err != nil { @@ -4110,89 +4399,6 @@ func testGetObjectClosedTwiceV2() { successLogger(testName, function, args, startTime).Info() } -// Tests removing partially uploaded objects. -func testRemovePartiallyUploadedV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveIncompleteUpload(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) - - reader, writer := io.Pipe() - go func() { - i := 0 - for i < 25 { - _, cerr := io.CopyN(writer, r, 128*1024) - if cerr != nil { - logError(testName, function, args, startTime, "", "Copy failed", cerr) - return - } - i++ - r.Seek(0, 0) - } - writer.CloseWithError(errors.New("proactively closed to be verified later")) - }() - - objectName := bucketName + "-resumable" - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject should fail", err) - return - } - if err.Error() != "proactively closed to be verified later" { - logError(testName, function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err) - return - } - err = c.RemoveIncompleteUpload(bucketName, objectName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - // Tests FPutObject hidden contentType setting func testFPutObjectV2() { // initialize logging params @@ -4504,6 +4710,7 @@ func testGetObjectReadSeekFunctionalV2() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer r.Close() st, err := r.Stat() if err != nil { @@ -4667,6 +4874,7 @@ func testGetObjectReadAtFunctionalV2() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer r.Close() st, err := r.Stat() if err != nil { @@ -4839,6 +5047,7 @@ func testCopyObjectV2() { logError(testName, function, args, startTime, "", "Stat failed", err) return } + r.Close() // Copy Source src := minio.NewSourceInfo(bucketName, objectName, nil) @@ -4921,6 +5130,10 @@ func testCopyObjectV2() { return } + // Close all the readers. + r.Close() + readerCopy.Close() + // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) @@ -5065,86 +5278,186 @@ func testComposeMultipleSources(c *minio.Client) { // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.SourceInfo{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) + } + // make the last part very small + err = srcs[9].SetRange(0, 0) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + args["sourceList"] = srcs + + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) + args["destination"] = dst + + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + err = c.ComposeObject(dst, srcs) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test concatenating multiple objects objects +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - // Upload a small source object - const srcSize = 1024 * 1024 * 5 - buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) + logError(testName, function, args, startTime, "", "PutObject call failed", err) return } - // We will append 10 copies of the object. - srcs := []minio.SourceInfo{} - for i := 0; i < 10; i++ { - srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) - } - // make the last part very small - err = srcs[9].SetRange(0, 0) + // 2. Test CopyObject for an empty object + dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) if err != nil { - logError(testName, function, args, startTime, "", "SetRange failed", err) + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + srcInfo := minio.NewSourceInfo(bucketName, "object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) return } - args["sourceList"] = srcs - - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) - args["destination"] = dst + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) if err != nil { + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) return } - err = c.ComposeObject(dst, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) + + srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) return } - objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) + // 4. Download the object. + reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) + logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer reader.Close() - if objProps.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) return } // Delete all objects and buckets + delete(args, "objectName") if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) return } - successLogger(testName, function, args, startTime).Info() -} - -// Test concatenating multiple objects objects -func testCompose10KSourcesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) - return - } - testComposeMultipleSources(c) + successLogger(testName, function, args, startTime).Info() } func testEncryptedCopyObjectWrapper(c *minio.Client) { @@ -5163,26 +5476,24 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { return } - key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256") - key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256") + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) // 1. create an sse-c encrypted object to copy by uploading const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(map[string]string) - for k, v := range key1.GetSSEHeaders() { - metadata[k] = v - } - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil}) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) return } // 2. copy object and change encryption key - src := minio.NewSourceInfo(bucketName, "srcObject", &key1) + src := minio.NewSourceInfo(bucketName, "srcObject", sseSrc) args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil) + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) if err != nil { logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) return @@ -5196,17 +5507,12 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { } // 3. get copied object and check if content is equal - opts := minio.GetObjectOptions{} - for k, v := range key2.GetSSEHeaders() { - opts.Set(k, v) - } coreClient := minio.Core{c} - reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts) + reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: sseDst}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } - defer reader.Close() decBytes, err := ioutil.ReadAll(reader) if err != nil { @@ -5217,6 +5523,75 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) return } + reader.Close() + + // Test key rotation for source object in-place. + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) + args["source"] = src + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied decrypted object and check if content is equal + reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + // Delete all objects and buckets if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) @@ -5270,9 +5645,64 @@ func testEncryptedCopyObjectV2() { return } + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c) } +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(bucketName, "us-east-1"); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption)) + args["source"] = src + dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + if err = c.CopyObject(dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + func testUserMetadataCopying() { // initialize logging params startTime := time.Now() @@ -5990,7 +6420,7 @@ func testFunctionalV2() { startTime := time.Now() testName := getFuncName() function := "testFunctionalV2()" - function_all := "" + functionAll := "" args := map[string]interface{}{} // Seed random based on current time. @@ -6018,7 +6448,7 @@ func testFunctionalV2() { location := "us-east-1" // Make a new bucket. function = "MakeBucket(bucketName, location)" - function_all = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" args = map[string]interface{}{ "bucketName": bucketName, "location": location, @@ -6049,7 +6479,7 @@ func testFunctionalV2() { // Verify if bucket exits and you have access. var exists bool function = "BucketExists(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -6064,14 +6494,17 @@ func testFunctionalV2() { } // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads,s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `/*"],"Sid": ""}]}` + args = map[string]interface{}{ "bucketName": bucketName, - "objectPrefix": "", - "bucketPolicy": policy.BucketPolicyReadWrite, + "bucketPolicy": readWritePolicy, } - err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) + err = c.SetBucketPolicy(bucketName, readWritePolicy) + if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return @@ -6079,7 +6512,7 @@ func testFunctionalV2() { // List all buckets. function = "ListBuckets()" - function_all += ", " + function + functionAll += ", " + function args = nil buckets, err := c.ListBuckets() if len(buckets) == 0 { @@ -6145,7 +6578,7 @@ func testFunctionalV2() { objFound := false isRecursive := true // Recursive is true. function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6164,7 +6597,7 @@ func testFunctionalV2() { incompObjNotFound := true function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6182,7 +6615,7 @@ func testFunctionalV2() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6198,6 +6631,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "ReadAll failed", err) return } + newReader.Close() if !bytes.Equal(newReadBytes, buf) { logError(testName, function, args, startTime, "", "Bytes mismatch", err) @@ -6205,7 +6639,7 @@ func testFunctionalV2() { } function = "FGetObject(bucketName, objectName, fileName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6219,7 +6653,7 @@ func testFunctionalV2() { // Generate presigned HEAD object url. function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6248,7 +6682,7 @@ func testFunctionalV2() { // Generate presigned GET object url. function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6316,7 +6750,7 @@ func testFunctionalV2() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -6350,7 +6784,7 @@ func testFunctionalV2() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -6366,6 +6800,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "ReadAll failed", err) return } + newReader.Close() if !bytes.Equal(newReadBytes, buf) { logError(testName, function, args, startTime, "", "Bytes mismatch", err) @@ -6386,7 +6821,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "File removes failed", err) return } - successLogger(testName, function_all, args, startTime).Info() + successLogger(testName, functionAll, args, startTime).Info() } // Test get object with GetObjectWithContext @@ -6454,10 +6889,12 @@ func testGetObjectWithContext() { logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) return } + if _, err = r.Stat(); err == nil { logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) return } + r.Close() ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) args["ctx"] = ctx @@ -6736,6 +7173,7 @@ func testGetObjectWithContextV2() { logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) return } + r.Close() ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() @@ -6865,6 +7303,120 @@ func testFGetObjectWithContextV2() { } +// Test list object v1 and V2 storage class fields +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName1 := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName1, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject1 call failed", err) + return + } + + bufSize1 := dataFileMap["datafile-33-kB"] + var reader1 = getDataReader("datafile-33-kB") + defer reader1.Close() + objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName2, reader1, int64(bufSize1), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject2 call failed", err) + return + } + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + + // check for storage-class from ListObjects result + for objInfo := range c.ListObjects(bucketName, "", true, doneCh) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { + logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err) + return + } + if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { + logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err) + return + } + } + + // check for storage-class from ListObjectsV2 result + for objInfo := range c.ListObjectsV2(bucketName, "", true, doneCh) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjectsV2 failed unexpectedly", err) + return + } + if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { + logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err) + return + } + if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { + logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err) + return + } + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + // Convert string to bool and always return false if any error func mustParseBool(str string) bool { b, err := strconv.ParseBool(str) @@ -6889,7 +7441,6 @@ func main() { if isFullMode() { testMakeBucketErrorV2() testGetObjectClosedTwiceV2() - testRemovePartiallyUploadedV2() testFPutObjectV2() testMakeBucketRegionsV2() testGetObjectReadSeekFunctionalV2() @@ -6911,19 +7462,15 @@ func main() { testPutObjectWithMetadata() testPutObjectReadAt() testPutObjectStreaming() - testListPartiallyUploaded() testGetObjectSeekEnd() testGetObjectClosedTwice() testRemoveMultipleObjects() - testRemovePartiallyUploaded() testFPutObjectMultipart() testFPutObject() testGetObjectReadSeekFunctional() testGetObjectReadAtFunctional() testPresignedPostPolicy() testCopyObject() - testEncryptionPutGet() - testEncryptionFPut() testComposeObjectErrorCases() testCompose10KSources() testUserMetadataCopying() @@ -6938,11 +7485,19 @@ func main() { testStorageClassMetadataPutObject() testStorageClassInvalidMetadataPutObject() testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() // SSE-C tests will only work over TLS connection. if tls { + testEncryptionPutGet() + testEncryptionFPut() + testEncryptedGetObjectReadAtFunctional() + testEncryptedGetObjectReadSeekFunctional() testEncryptedCopyObjectV2() testEncryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() } } else { testFunctional() diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go deleted file mode 100644 index b0f2d6e08..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "errors" - "io" -) - -// Crypt mode - encryption or decryption -type cryptMode int - -const ( - encryptMode cryptMode = iota - decryptMode -) - -// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm -type CBCSecureMaterials struct { - - // Data stream to encrypt/decrypt - stream io.Reader - - // Last internal error - err error - - // End of file reached - eof bool - - // Holds initial data - srcBuf *bytes.Buffer - - // Holds transformed data (encrypted or decrypted) - dstBuf *bytes.Buffer - - // Encryption algorithm - encryptionKey Key - - // Key to encrypts/decrypts data - contentKey []byte - - // Encrypted form of contentKey - cryptedKey []byte - - // Initialization vector - iv []byte - - // matDesc - currently unused - matDesc []byte - - // Indicate if we are going to encrypt or decrypt - cryptMode cryptMode - - // Helper that encrypts/decrypts data - blockMode cipher.BlockMode -} - -// NewCBCSecureMaterials builds new CBC crypter module with -// the specified encryption key (symmetric or asymmetric) -func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { - if key == nil { - return nil, errors.New("Unable to recognize empty encryption properties") - } - return &CBCSecureMaterials{ - srcBuf: bytes.NewBuffer([]byte{}), - dstBuf: bytes.NewBuffer([]byte{}), - encryptionKey: key, - matDesc: []byte("{}"), - }, nil - -} - -// Close implements closes the internal stream. -func (s *CBCSecureMaterials) Close() error { - closer, ok := s.stream.(io.Closer) - if ok { - return closer.Close() - } - return nil -} - -// SetupEncryptMode - tells CBC that we are going to encrypt data -func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { - // Set mode to encrypt - s.cryptMode = encryptMode - - // Set underlying reader - s.stream = stream - - s.eof = false - s.srcBuf.Reset() - s.dstBuf.Reset() - - var err error - - // Generate random content key - s.contentKey = make([]byte, aes.BlockSize*2) - if _, err := rand.Read(s.contentKey); err != nil { - return err - } - // Encrypt content key - s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey) - if err != nil { - return err - } - // Generate random IV - s.iv = make([]byte, aes.BlockSize) - if _, err = rand.Read(s.iv); err != nil { - return err - } - // New cipher - encryptContentBlock, err := aes.NewCipher(s.contentKey) - if err != nil { - return err - } - - s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv) - - return nil -} - -// SetupDecryptMode - tells CBC that we are going to decrypt data -func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error { - // Set mode to decrypt - s.cryptMode = decryptMode - - // Set underlying reader - s.stream = stream - - // Reset - s.eof = false - s.srcBuf.Reset() - s.dstBuf.Reset() - - var err error - - // Get IV - s.iv, err = base64.StdEncoding.DecodeString(iv) - if err != nil { - return err - } - - // Get encrypted content key - s.cryptedKey, err = base64.StdEncoding.DecodeString(key) - if err != nil { - return err - } - - // Decrypt content key - s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey) - if err != nil { - return err - } - - // New cipher - decryptContentBlock, err := aes.NewCipher(s.contentKey) - if err != nil { - return err - } - - s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv) - return nil -} - -// GetIV - return randomly generated IV (per S3 object), base64 encoded. -func (s *CBCSecureMaterials) GetIV() string { - return base64.StdEncoding.EncodeToString(s.iv) -} - -// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded. -func (s *CBCSecureMaterials) GetKey() string { - return base64.StdEncoding.EncodeToString(s.cryptedKey) -} - -// GetDesc - user provided encryption material description in JSON (UTF8) format. -func (s *CBCSecureMaterials) GetDesc() string { - return string(s.matDesc) -} - -// Fill buf with encrypted/decrypted data -func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) { - // Always fill buf from bufChunk at the end of this function - defer func() { - if s.err != nil { - n, err = 0, s.err - } else { - n, err = s.dstBuf.Read(buf) - } - }() - - // Return - if s.eof { - return - } - - // Fill dest buffer if its length is less than buf - for !s.eof && s.dstBuf.Len() < len(buf) { - - srcPart := make([]byte, aes.BlockSize) - dstPart := make([]byte, aes.BlockSize) - - // Fill src buffer - for s.srcBuf.Len() < aes.BlockSize*2 { - _, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize) - if err != nil { - break - } - } - - // Quit immediately for errors other than io.EOF - if err != nil && err != io.EOF { - s.err = err - return - } - - // Mark current encrypting/decrypting as finished - s.eof = (err == io.EOF) - - if s.eof && s.cryptMode == encryptMode { - if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil { - s.err = err - return - } - } else { - _, _ = s.srcBuf.Read(srcPart) - } - - // Crypt srcPart content - for len(srcPart) > 0 { - - // Crypt current part - s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize]) - - // Unpad when this is the last part and we are decrypting - if s.eof && s.cryptMode == decryptMode { - dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize) - if err != nil { - s.err = err - return - } - } - - // Send crypted data to dstBuf - if _, wErr := s.dstBuf.Write(dstPart); wErr != nil { - s.err = wErr - return - } - // Move to the next part - srcPart = srcPart[aes.BlockSize:] - } - } - return -} - -// Unpad a set of bytes following PKCS5 algorithm -func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) { - len := len(buf) - if len == 0 { - return nil, errors.New("buffer is empty") - } - pad := int(buf[len-1]) - if pad > len || pad > blockSize { - return nil, errors.New("invalid padding size") - } - return buf[:len-pad], nil -} - -// Pad a set of bytes following PKCS5 algorithm -func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) { - len := len(buf) - pad := blockSize - (len % blockSize) - padText := bytes.Repeat([]byte{byte(pad)}, pad) - return append(buf, padText...), nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go deleted file mode 100644 index 482922ab7..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package encrypt implements a generic interface to encrypt any stream of data. -// currently this package implements two types of encryption -// - Symmetric encryption using AES. -// - Asymmetric encrytion using RSA. -package encrypt - -import "io" - -// Materials - provides generic interface to encrypt any stream of data. -type Materials interface { - - // Closes the wrapped stream properly, initiated by the caller. - Close() error - - // Returns encrypted/decrypted data, io.Reader compatible. - Read(b []byte) (int, error) - - // Get randomly generated IV, base64 encoded. - GetIV() (iv string) - - // Get content encrypting key (cek) in encrypted form, base64 encoded. - GetKey() (key string) - - // Get user provided encryption material description in - // JSON (UTF8) format. This is not used, kept for future. - GetDesc() (desc string) - - // Setup encrypt mode, further calls of Read() function - // will return the encrypted form of data streamed - // by the passed reader - SetupEncryptMode(stream io.Reader) error - - // Setup decrypted mode, further calls of Read() function - // will return the decrypted form of data streamed - // by the passed reader - SetupDecryptMode(stream io.Reader, iv string, key string) error -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go deleted file mode 100644 index 0ed95f5ff..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/aes" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "errors" -) - -// Key - generic interface to encrypt/decrypt a key. -// We use it to encrypt/decrypt content key which is the key -// that encrypt/decrypt object data. -type Key interface { - // Encrypt data using to the set encryption key - Encrypt([]byte) ([]byte, error) - // Decrypt data using to the set encryption key - Decrypt([]byte) ([]byte, error) -} - -// SymmetricKey - encrypts data with a symmetric master key -type SymmetricKey struct { - masterKey []byte -} - -// Encrypt passed bytes -func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) { - // Initialize an AES encryptor using a master key - keyBlock, err := aes.NewCipher(s.masterKey) - if err != nil { - return []byte{}, err - } - - // Pad the key before encryption - plain, _ = pkcs5Pad(plain, aes.BlockSize) - - encKey := []byte{} - encPart := make([]byte, aes.BlockSize) - - // Encrypt the passed key by block - for { - if len(plain) < aes.BlockSize { - break - } - // Encrypt the passed key - keyBlock.Encrypt(encPart, plain[:aes.BlockSize]) - // Add the encrypted block to the total encrypted key - encKey = append(encKey, encPart...) - // Pass to the next plain block - plain = plain[aes.BlockSize:] - } - return encKey, nil -} - -// Decrypt passed bytes -func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) { - // Initialize AES decrypter - keyBlock, err := aes.NewCipher(s.masterKey) - if err != nil { - return nil, err - } - - var plain []byte - plainPart := make([]byte, aes.BlockSize) - - // Decrypt the encrypted data block by block - for { - if len(cipher) < aes.BlockSize { - break - } - keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize]) - // Add the decrypted block to the total result - plain = append(plain, plainPart...) - // Pass to the next cipher block - cipher = cipher[aes.BlockSize:] - } - - // Unpad the resulted plain data - plain, err = pkcs5Unpad(plain, aes.BlockSize) - if err != nil { - return nil, err - } - - return plain, nil -} - -// NewSymmetricKey generates a new encrypt/decrypt crypto using -// an AES master key password -func NewSymmetricKey(b []byte) *SymmetricKey { - return &SymmetricKey{masterKey: b} -} - -// AsymmetricKey - struct which encrypts/decrypts data -// using RSA public/private certificates -type AsymmetricKey struct { - publicKey *rsa.PublicKey - privateKey *rsa.PrivateKey -} - -// Encrypt data using public key -func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) { - cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain) - if err != nil { - return nil, err - } - return cipher, nil -} - -// Decrypt data using public key -func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) { - cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher) - if err != nil { - return nil, err - } - return cipher, nil -} - -// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt -// data using a pair for private and public key -func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) { - // Parse private key from passed data - priv, err := x509.ParsePKCS8PrivateKey(privData) - if err != nil { - return nil, err - } - privKey, ok := priv.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("not a valid private key") - } - - // Parse public key from passed data - pub, err := x509.ParsePKIXPublicKey(pubData) - if err != nil { - return nil, err - } - - pubKey, ok := pub.(*rsa.PublicKey) - if !ok { - return nil, errors.New("not a valid public key") - } - - // Associate the private key with the passed public key - privKey.PublicKey = *pubKey - - return &AsymmetricKey{ - publicKey: pubKey, - privateKey: privKey, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go new file mode 100644 index 000000000..2d3c70f00 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go @@ -0,0 +1,195 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "net/http" + + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseKmsKeyID is the AWS SSE-KMS key id. + sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" + // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. + sseEncryptionContext = sseGenericHeader + "-Encryption-Context" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(sseGenericHeader, "aws:kms") + h.Set(sseKmsKeyID, s.key) + if s.hasContext { + h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go deleted file mode 100644 index 737b810ac..000000000 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import "github.com/minio/minio-go/pkg/set" - -// ConditionKeyMap - map of policy condition key and value. -type ConditionKeyMap map[string]set.StringSet - -// Add - adds key and value. The value is appended If key already exists. -func (ckm ConditionKeyMap) Add(key string, value set.StringSet) { - if v, ok := ckm[key]; ok { - ckm[key] = v.Union(value) - } else { - ckm[key] = set.CopyStringSet(value) - } -} - -// Remove - removes value of given key. If key has empty after removal, the key is also removed. -func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) { - if v, ok := ckm[key]; ok { - if value != nil { - ckm[key] = v.Difference(value) - } - - if ckm[key].IsEmpty() { - delete(ckm, key) - } - } -} - -// RemoveKey - removes key and its value. -func (ckm ConditionKeyMap) RemoveKey(key string) { - if _, ok := ckm[key]; ok { - delete(ckm, key) - } -} - -// CopyConditionKeyMap - returns new copy of given ConditionKeyMap. -func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap { - out := make(ConditionKeyMap) - - for k, v := range condKeyMap { - out[k] = set.CopyStringSet(v) - } - - return out -} - -// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap. -func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap { - out := CopyConditionKeyMap(condKeyMap1) - - for k, v := range condKeyMap2 { - if ev, ok := out[k]; ok { - out[k] = ev.Union(v) - } else { - out[k] = set.CopyStringSet(v) - } - } - - return out -} - -// ConditionMap - map of condition and conditional values. -type ConditionMap map[string]ConditionKeyMap - -// Add - adds condition key and condition value. The value is appended if key already exists. -func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) { - if v, ok := cond[condKey]; ok { - cond[condKey] = mergeConditionKeyMap(v, condKeyMap) - } else { - cond[condKey] = CopyConditionKeyMap(condKeyMap) - } -} - -// Remove - removes condition key and its value. -func (cond ConditionMap) Remove(condKey string) { - if _, ok := cond[condKey]; ok { - delete(cond, condKey) - } -} - -// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap. -func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap { - out := make(ConditionMap) - - for k, v := range condMap1 { - out[k] = CopyConditionKeyMap(v) - } - - for k, v := range condMap2 { - if ev, ok := out[k]; ok { - out[k] = mergeConditionKeyMap(ev, v) - } else { - out[k] = CopyConditionKeyMap(v) - } - } - - return out -} diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go deleted file mode 100644 index 9dda99efc..000000000 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ /dev/null @@ -1,635 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "reflect" - "strings" - - "github.com/minio/minio-go/pkg/set" -) - -// BucketPolicy - Bucket level policy. -type BucketPolicy string - -// Different types of Policies currently supported for buckets. -const ( - BucketPolicyNone BucketPolicy = "none" - BucketPolicyReadOnly = "readonly" - BucketPolicyReadWrite = "readwrite" - BucketPolicyWriteOnly = "writeonly" -) - -// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise. -func (p BucketPolicy) IsValidBucketPolicy() bool { - switch p { - case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly: - return true - } - return false -} - -// Resource prefix for all aws resources. -const awsResourcePrefix = "arn:aws:s3:::" - -// Common bucket actions for both read and write policies. -var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation") - -// Read only bucket actions. -var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket") - -// Write only bucket actions. -var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads") - -// Read only object actions. -var readOnlyObjectActions = set.CreateStringSet("s3:GetObject") - -// Write only object actions. -var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject") - -// Read and write object actions. -var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions) - -// All valid bucket and object actions. -var validActions = commonBucketActions. - Union(readOnlyBucketActions). - Union(writeOnlyBucketActions). - Union(readOnlyObjectActions). - Union(writeOnlyObjectActions) - -var startsWithFunc = func(resource string, resourcePrefix string) bool { - return strings.HasPrefix(resource, resourcePrefix) -} - -// User - canonical users list. -type User struct { - AWS set.StringSet `json:"AWS,omitempty"` - CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"` -} - -// Statement - minio policy statement -type Statement struct { - Actions set.StringSet `json:"Action"` - Conditions ConditionMap `json:"Condition,omitempty"` - Effect string - Principal User `json:"Principal"` - Resources set.StringSet `json:"Resource"` - Sid string -} - -// BucketAccessPolicy - minio policy collection -type BucketAccessPolicy struct { - Version string // date in YYYY-MM-DD format - Statements []Statement `json:"Statement"` -} - -// isValidStatement - returns whether given statement is valid to process for given bucket name. -func isValidStatement(statement Statement, bucketName string) bool { - if statement.Actions.Intersection(validActions).IsEmpty() { - return false - } - - if statement.Effect != "Allow" { - return false - } - - if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") { - return false - } - - bucketResource := awsResourcePrefix + bucketName - if statement.Resources.Contains(bucketResource) { - return true - } - - if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() { - return false - } - - return true -} - -// Returns new statements with bucket actions for given policy. -func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { - statements = []Statement{} - if policy == BucketPolicyNone || bucketName == "" { - return statements - } - - bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName) - - statement := Statement{ - Actions: commonBucketActions, - Effect: "Allow", - Principal: User{AWS: set.CreateStringSet("*")}, - Resources: bucketResource, - Sid: "", - } - statements = append(statements, statement) - - if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite { - statement = Statement{ - Actions: readOnlyBucketActions, - Effect: "Allow", - Principal: User{AWS: set.CreateStringSet("*")}, - Resources: bucketResource, - Sid: "", - } - if prefix != "" { - condKeyMap := make(ConditionKeyMap) - condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix)) - condMap := make(ConditionMap) - condMap.Add("StringEquals", condKeyMap) - statement.Conditions = condMap - } - statements = append(statements, statement) - } - - if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite { - statement = Statement{ - Actions: writeOnlyBucketActions, - Effect: "Allow", - Principal: User{AWS: set.CreateStringSet("*")}, - Resources: bucketResource, - Sid: "", - } - statements = append(statements, statement) - } - - return statements -} - -// Returns new statements contains object actions for given policy. -func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { - statements = []Statement{} - if policy == BucketPolicyNone || bucketName == "" { - return statements - } - - statement := Statement{ - Effect: "Allow", - Principal: User{AWS: set.CreateStringSet("*")}, - Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"), - Sid: "", - } - - if policy == BucketPolicyReadOnly { - statement.Actions = readOnlyObjectActions - } else if policy == BucketPolicyWriteOnly { - statement.Actions = writeOnlyObjectActions - } else if policy == BucketPolicyReadWrite { - statement.Actions = readWriteObjectActions - } - - statements = append(statements, statement) - return statements -} - -// Returns new statements for given policy, bucket and prefix. -func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { - statements = []Statement{} - ns := newBucketStatement(policy, bucketName, prefix) - statements = append(statements, ns...) - - ns = newObjectStatement(policy, bucketName, prefix) - statements = append(statements, ns...) - - return statements -} - -// Returns whether given bucket statements are used by other than given prefix statements. -func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) { - resourcePrefix := awsResourcePrefix + bucketName + "/" - objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" - - for _, s := range statements { - if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() { - if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { - readOnlyInUse = true - } - - if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { - writeOnlyInUse = true - } - } - if readOnlyInUse && writeOnlyInUse { - break - } - } - - return readOnlyInUse, writeOnlyInUse -} - -// Removes object actions in given statement. -func removeObjectActions(statement Statement, objectResource string) Statement { - if statement.Conditions == nil { - if len(statement.Resources) > 1 { - statement.Resources.Remove(objectResource) - } else { - statement.Actions = statement.Actions.Difference(readOnlyObjectActions) - statement.Actions = statement.Actions.Difference(writeOnlyObjectActions) - } - } - - return statement -} - -// Removes bucket actions for given policy in given statement. -func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement { - removeReadOnly := func() { - if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { - return - } - - if statement.Conditions == nil { - statement.Actions = statement.Actions.Difference(readOnlyBucketActions) - return - } - - if prefix != "" { - stringEqualsValue := statement.Conditions["StringEquals"] - values := set.NewStringSet() - if stringEqualsValue != nil { - values = stringEqualsValue["s3:prefix"] - if values == nil { - values = set.NewStringSet() - } - } - - values.Remove(prefix) - - if stringEqualsValue != nil { - if values.IsEmpty() { - delete(stringEqualsValue, "s3:prefix") - } - if len(stringEqualsValue) == 0 { - delete(statement.Conditions, "StringEquals") - } - } - - if len(statement.Conditions) == 0 { - statement.Conditions = nil - statement.Actions = statement.Actions.Difference(readOnlyBucketActions) - } - } - } - - removeWriteOnly := func() { - if statement.Conditions == nil { - statement.Actions = statement.Actions.Difference(writeOnlyBucketActions) - } - } - - if len(statement.Resources) > 1 { - statement.Resources.Remove(bucketResource) - } else { - if !readOnlyInUse { - removeReadOnly() - } - - if !writeOnlyInUse { - removeWriteOnly() - } - } - - return statement -} - -// Returns statements containing removed actions/statements for given -// policy, bucket name and prefix. -func removeStatements(statements []Statement, bucketName string, prefix string) []Statement { - bucketResource := awsResourcePrefix + bucketName - objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" - readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix) - - out := []Statement{} - readOnlyBucketStatements := []Statement{} - s3PrefixValues := set.NewStringSet() - - for _, statement := range statements { - if !isValidStatement(statement, bucketName) { - out = append(out, statement) - continue - } - - if statement.Resources.Contains(bucketResource) { - if statement.Conditions != nil { - statement = removeBucketActions(statement, prefix, bucketResource, false, false) - } else { - statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse) - } - } else if statement.Resources.Contains(objectResource) { - statement = removeObjectActions(statement, objectResource) - } - - if !statement.Actions.IsEmpty() { - if statement.Resources.Contains(bucketResource) && - statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) && - statement.Effect == "Allow" && - statement.Principal.AWS.Contains("*") { - - if statement.Conditions != nil { - stringEqualsValue := statement.Conditions["StringEquals"] - values := set.NewStringSet() - if stringEqualsValue != nil { - values = stringEqualsValue["s3:prefix"] - if values == nil { - values = set.NewStringSet() - } - } - s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string { - return bucketResource + "/" + v + "*" - })) - } else if !s3PrefixValues.IsEmpty() { - readOnlyBucketStatements = append(readOnlyBucketStatements, statement) - continue - } - } - out = append(out, statement) - } - } - - skipBucketStatement := true - resourcePrefix := awsResourcePrefix + bucketName + "/" - for _, statement := range out { - if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() && - s3PrefixValues.Intersection(statement.Resources).IsEmpty() { - skipBucketStatement = false - break - } - } - - for _, statement := range readOnlyBucketStatements { - if skipBucketStatement && - statement.Resources.Contains(bucketResource) && - statement.Effect == "Allow" && - statement.Principal.AWS.Contains("*") && - statement.Conditions == nil { - continue - } - - out = append(out, statement) - } - - if len(out) == 1 { - statement := out[0] - if statement.Resources.Contains(bucketResource) && - statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && - statement.Effect == "Allow" && - statement.Principal.AWS.Contains("*") && - statement.Conditions == nil { - out = []Statement{} - } - } - - return out -} - -// Appends given statement into statement list to have unique statements. -// - If statement already exists in statement list, it ignores. -// - If statement exists with different conditions, they are merged. -// - Else the statement is appended to statement list. -func appendStatement(statements []Statement, statement Statement) []Statement { - for i, s := range statements { - if s.Actions.Equals(statement.Actions) && - s.Effect == statement.Effect && - s.Principal.AWS.Equals(statement.Principal.AWS) && - reflect.DeepEqual(s.Conditions, statement.Conditions) { - statements[i].Resources = s.Resources.Union(statement.Resources) - return statements - } else if s.Resources.Equals(statement.Resources) && - s.Effect == statement.Effect && - s.Principal.AWS.Equals(statement.Principal.AWS) && - reflect.DeepEqual(s.Conditions, statement.Conditions) { - statements[i].Actions = s.Actions.Union(statement.Actions) - return statements - } - - if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) && - s.Actions.Intersection(statement.Actions).Equals(statement.Actions) && - s.Effect == statement.Effect && - s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) { - if reflect.DeepEqual(s.Conditions, statement.Conditions) { - return statements - } - if s.Conditions != nil && statement.Conditions != nil { - if s.Resources.Equals(statement.Resources) { - statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions) - return statements - } - } - } - } - - if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) { - return append(statements, statement) - } - - return statements -} - -// Appends two statement lists. -func appendStatements(statements []Statement, appendStatements []Statement) []Statement { - for _, s := range appendStatements { - statements = appendStatement(statements, s) - } - - return statements -} - -// Returns policy of given bucket statement. -func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) { - if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) { - return commonFound, readOnly, writeOnly - } - - if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && - statement.Conditions == nil { - commonFound = true - } - - if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) && - statement.Conditions == nil { - writeOnly = true - } - - if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { - if prefix != "" && statement.Conditions != nil { - if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok { - if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok { - if s3PrefixValues.Contains(prefix) { - readOnly = true - } - } - } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok { - if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok { - if !s3PrefixValues.Contains(prefix) { - readOnly = true - } - } - } - } else if prefix == "" && statement.Conditions == nil { - readOnly = true - } else if prefix != "" && statement.Conditions == nil { - readOnly = true - } - } - - return commonFound, readOnly, writeOnly -} - -// Returns policy of given object statement. -func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) { - if statement.Effect == "Allow" && - statement.Principal.AWS.Contains("*") && - statement.Conditions == nil { - if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { - readOnly = true - } - if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { - writeOnly = true - } - } - - return readOnly, writeOnly -} - -// GetPolicy - Returns policy of given bucket name, prefix in given statements. -func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy { - bucketResource := awsResourcePrefix + bucketName - objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" - - bucketCommonFound := false - bucketReadOnly := false - bucketWriteOnly := false - matchedResource := "" - objReadOnly := false - objWriteOnly := false - - for _, s := range statements { - matchedObjResources := set.NewStringSet() - if s.Resources.Contains(objectResource) { - matchedObjResources.Add(objectResource) - } else { - matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource) - } - - if !matchedObjResources.IsEmpty() { - readOnly, writeOnly := getObjectPolicy(s) - for resource := range matchedObjResources { - if len(matchedResource) < len(resource) { - objReadOnly = readOnly - objWriteOnly = writeOnly - matchedResource = resource - } else if len(matchedResource) == len(resource) { - objReadOnly = objReadOnly || readOnly - objWriteOnly = objWriteOnly || writeOnly - matchedResource = resource - } - } - } else if s.Resources.Contains(bucketResource) { - commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix) - bucketCommonFound = bucketCommonFound || commonFound - bucketReadOnly = bucketReadOnly || readOnly - bucketWriteOnly = bucketWriteOnly || writeOnly - } - } - - policy := BucketPolicyNone - if bucketCommonFound { - if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly { - policy = BucketPolicyReadWrite - } else if bucketReadOnly && objReadOnly { - policy = BucketPolicyReadOnly - } else if bucketWriteOnly && objWriteOnly { - policy = BucketPolicyWriteOnly - } - } - - return policy -} - -// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements. -func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { - policyRules := map[string]BucketPolicy{} - objResources := set.NewStringSet() - // Search all resources related to objects policy - for _, s := range statements { - for r := range s.Resources { - if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") { - objResources.Add(r) - } - } - } - // Pretend that policy resource as an actual object and fetch its policy - for r := range objResources { - // Put trailing * if exists in asterisk - asterisk := "" - if strings.HasSuffix(r, "*") { - r = r[:len(r)-1] - asterisk = "*" - } - objectPath := r[len(awsResourcePrefix+bucketName)+1:] - p := GetPolicy(statements, bucketName, objectPath) - policyRules[bucketName+"/"+objectPath+asterisk] = p - } - return policyRules -} - -// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended. -func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement { - out := removeStatements(statements, bucketName, prefix) - // fmt.Println("out = ") - // printstatement(out) - ns := newStatements(policy, bucketName, prefix) - // fmt.Println("ns = ") - // printstatement(ns) - - rv := appendStatements(out, ns) - // fmt.Println("rv = ") - // printstatement(rv) - - return rv -} - -// Match function matches wild cards in 'pattern' for resource. -func resourceMatch(pattern, resource string) bool { - if pattern == "" { - return resource == pattern - } - if pattern == "*" { - return true - } - parts := strings.Split(pattern, "*") - if len(parts) == 1 { - return resource == pattern - } - tGlob := strings.HasSuffix(pattern, "*") - end := len(parts) - 1 - if !strings.HasPrefix(resource, parts[0]) { - return false - } - for i := 1; i < end; i++ { - if !strings.Contains(resource, parts[i]) { - return false - } - idx := strings.Index(resource, parts[i]) + len(parts[i]) - resource = resource[idx:] - } - return tGlob || strings.HasSuffix(resource, parts[end]) -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go index 0b90c41f6..b4070938e 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -25,7 +25,6 @@ import ( "fmt" "net/http" "net/url" - "path/filepath" "sort" "strconv" "strings" @@ -40,21 +39,17 @@ const ( ) // Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request) (path string) { - reqHost := getHostAddr(req) - // Encode URL path. - if isS3, _ := filepath.Match("*.s3*.amazonaws.com", reqHost); isS3 { - bucketName := reqHost[:strings.LastIndex(reqHost, ".s3")] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - if strings.HasSuffix(reqHost, ".storage.googleapis.com") { - path = "/" + strings.TrimSuffix(reqHost, ".storage.googleapis.com") - path += req.URL.Path - path = s3utils.EncodePath(path) - return +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } } path = s3utils.EncodePath(req.URL.Path) return @@ -62,7 +57,7 @@ func encodeURL2Path(req *http.Request) (path string) { // PreSignV2 - presign the request in following style. // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -78,7 +73,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in } // Get presigned string to sign. - stringToSign := preStringToSignV2(req) + stringToSign := preStringToSignV2(req, virtualHost) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -132,7 +127,7 @@ func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { // CanonicalizedProtocolHeaders = // SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -147,7 +142,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request } // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req) + stringToSign := stringToSignV2(req, virtualHost) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -172,14 +167,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request // Expires + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func preStringToSignV2(req http.Request) string { +func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. writePreSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req) + writeCanonicalizedResource(buf, req, virtualHost) return buf.String() } @@ -199,14 +194,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // Date + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func stringToSignV2(req http.Request) string { +func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. writeSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req) + writeCanonicalizedResource(buf, req, virtualHost) return buf.String() } @@ -288,11 +283,11 @@ var resourceList = []string{ // CanonicalizedResource = [ "/" + Bucket ] + // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req)) + buf.WriteString(encodeURL2Path(&req, virtualHost)) if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go index c21a76d79..49d6dcdf5 100644 --- a/vendor/github.com/minio/minio-go/retry.go +++ b/vendor/github.com/minio/minio-go/retry.go @@ -111,6 +111,9 @@ func isNetErrorRetryable(err error) bool { } else if strings.Contains(err.Error(), "connection timed out") { // If err is a net.Dial timeout, retry. return true + } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { + // If error is transport connection broken, retry. + return true } } } diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go index e2dafe172..88700cfe7 100644 --- a/vendor/github.com/minio/minio-go/transport.go +++ b/vendor/github.com/minio/minio-go/transport.go @@ -2,7 +2,7 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * Copyright 2017-2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,9 +25,10 @@ import ( "time" ) -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport http.RoundTripper = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, @@ -35,6 +36,7 @@ var defaultMinioTransport http.RoundTripper = &http.Transport{ DualStack: true, }).DialContext, MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 0f92546d3..2f02ac89f 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -221,16 +221,10 @@ var supportedHeaders = []string{ "cache-control", "content-encoding", "content-disposition", + "content-language", // Add more supported headers here. } -// cseHeaders is list of client side encryption headers -var cseHeaders = []string{ - "X-Amz-Iv", - "X-Amz-Key", - "X-Amz-Matdesc", -} - // isStorageClassHeader returns true if the header is a supported storage class header func isStorageClassHeader(headerKey string) bool { return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey) @@ -247,19 +241,6 @@ func isStandardHeader(headerKey string) bool { return false } -// isCSEHeader returns true if header is a client side encryption header. -func isCSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range cseHeaders { - header := strings.ToLower(h) - if (header == key) || - (("x-amz-meta-" + header) == key) { - return true - } - } - return false -} - // sseHeaders is list of server side encryption headers var sseHeaders = []string{ "x-amz-server-side-encryption", -- cgit v1.2.3-1-g7c22