summaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorJesús Espino <jespinog@gmail.com>2018-04-30 23:35:10 +0200
committerChristopher Speller <crspeller@gmail.com>2018-04-30 14:35:10 -0700
commitbaba8fa92f47cad604f6d2a2714d09f89178fbff (patch)
tree0b751ac64e9dd13fd544e37a6bf11c2f90ae9f36 /vendor
parenta5f006b8a94bdadf5343aacd2b58c5bad4485153 (diff)
downloadchat-baba8fa92f47cad604f6d2a2714d09f89178fbff.tar.gz
chat-baba8fa92f47cad604f6d2a2714d09f89178fbff.tar.bz2
chat-baba8fa92f47cad604f6d2a2714d09f89178fbff.zip
Upgrading minio-go library to 6.0.0 (#8651)
* Upgrading minio-go library to 6.0.0 * Removing unnecesary Gopkg constraint
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/minio/minio-go/Makefile4
-rw-r--r--vendor/github.com/minio/minio-go/README.md5
-rw-r--r--vendor/github.com/minio/minio-go/README_zh_CN.md1
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object.go117
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object-file.go13
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go37
-rw-r--r--vendor/github.com/minio/minio-go/api-get-options.go8
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go53
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go12
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go58
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-context.go6
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go36
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-encrypted.go44
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go17
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-streaming.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go37
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go21
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go2
-rw-r--r--vendor/github.com/minio/minio-go/api.go29
-rw-r--r--vendor/github.com/minio/minio-go/appveyor.yml1
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go4
-rw-r--r--vendor/github.com/minio/minio-go/bucket-notification.go69
-rw-r--r--vendor/github.com/minio/minio-go/constants.go7
-rw-r--r--vendor/github.com/minio/minio-go/core.go16
-rw-r--r--vendor/github.com/minio/minio-go/functional_tests.go1499
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go294
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/interface.go54
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/keys.go166
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go195
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go116
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go635
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go47
-rw-r--r--vendor/github.com/minio/minio-go/retry.go3
-rw-r--r--vendor/github.com/minio/minio-go/transport.go10
-rw-r--r--vendor/github.com/minio/minio-go/utils.go21
-rw-r--r--vendor/golang.org/x/crypto/argon2/argon2.go285
-rw-r--r--vendor/golang.org/x/crypto/argon2/blake2b.go53
-rw-r--r--vendor/golang.org/x/crypto/argon2/blamka_amd64.go61
-rw-r--r--vendor/golang.org/x/crypto/argon2/blamka_amd64.s252
-rw-r--r--vendor/golang.org/x/crypto/argon2/blamka_generic.go163
-rw-r--r--vendor/golang.org/x/crypto/argon2/blamka_ref.go15
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2b.go289
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go45
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s750
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go25
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s290
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2b_generic.go179
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2b_ref.go11
-rw-r--r--vendor/golang.org/x/crypto/blake2b/blake2x.go177
-rw-r--r--vendor/golang.org/x/crypto/blake2b/register.go32
51 files changed, 4106 insertions, 2166 deletions
diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile
index 05081c723..bad81ffaf 100644
--- a/vendor/github.com/minio/minio-go/Makefile
+++ b/vendor/github.com/minio/minio-go/Makefile
@@ -3,10 +3,10 @@ all: checks
checks:
@go get -t ./...
@go vet ./...
- @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
@go get github.com/dustin/go-humanize/...
@go get github.com/sirupsen/logrus/...
- @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
@go get -u github.com/a8m/mark/...
@go get -u github.com/minio/cli/...
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index 2dedc1a28..91b42049f 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -130,7 +130,6 @@ The full API Reference is available here.
### API Reference : Bucket policy Operations
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
-* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API Reference : Bucket notification Operations
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
@@ -156,10 +155,6 @@ The full API Reference is available here.
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
-### API Reference: Encrypted Object Operations
-* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
-* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
-
### API Reference : Presigned Operations
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md
index 5584f4255..a5acf199e 100644
--- a/vendor/github.com/minio/minio-go/README_zh_CN.md
+++ b/vendor/github.com/minio/minio-go/README_zh_CN.md
@@ -141,7 +141,6 @@ mc ls play/mymusic/
### API文档 : 存储桶策略
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
-* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API文档 : 存储桶通知
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
index 88b60d604..99b2adae8 100644
--- a/vendor/github.com/minio/minio-go/api-compose-object.go
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -19,7 +19,6 @@ package minio
import (
"context"
- "encoding/base64"
"fmt"
"net/http"
"net/url"
@@ -27,58 +26,15 @@ import (
"strings"
"time"
+ "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
-// SSEInfo - represents Server-Side-Encryption parameters specified by
-// a user.
-type SSEInfo struct {
- key []byte
- algo string
-}
-
-// NewSSEInfo - specifies (binary or un-encoded) encryption key and
-// algorithm name. If algo is empty, it defaults to "AES256". Ref:
-// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
-func NewSSEInfo(key []byte, algo string) SSEInfo {
- if algo == "" {
- algo = "AES256"
- }
- return SSEInfo{key, algo}
-}
-
-// internal method that computes SSE-C headers
-func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
- if s == nil {
- return nil
- }
-
- cs := ""
- if isCopySource {
- cs = "copy-source-"
- }
- return map[string]string{
- "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
- "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
- "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key),
- }
-}
-
-// GetSSEHeaders - computes and returns headers for SSE-C as key-value
-// pairs. They can be set as metadata in PutObject* requests (for
-// encryption) or be set as request headers in `Core.GetObject` (for
-// decryption).
-func (s *SSEInfo) GetSSEHeaders() map[string]string {
- return s.getSSEHeaders(false)
-}
-
// DestinationInfo - type with information about the object to be
// created via server-side copy requests, using the Compose API.
type DestinationInfo struct {
bucket, object string
-
- // key for encrypting destination
- encryption *SSEInfo
+ encryption encrypt.ServerSide
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
@@ -97,9 +53,7 @@ type DestinationInfo struct {
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
-func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
- userMeta map[string]string) (d DestinationInfo, err error) {
-
+func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucket); err != nil {
return d, err
@@ -125,7 +79,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
return DestinationInfo{
bucket: bucket,
object: object,
- encryption: encryptSSEC,
+ encryption: sse,
userMetadata: m,
}, nil
}
@@ -154,10 +108,8 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma
// server-side copying APIs.
type SourceInfo struct {
bucket, object string
-
- start, end int64
-
- decryptKey *SSEInfo
+ start, end int64
+ encryption encrypt.ServerSide
// Headers to send with the upload-part-copy request involving
// this source object.
Headers http.Header
@@ -169,23 +121,17 @@ type SourceInfo struct {
// `decryptSSEC` is the decryption key using server-side-encryption
// with customer provided key. It may be nil if the source is not
// encrypted.
-func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
+func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo {
r := SourceInfo{
bucket: bucket,
object: object,
start: -1, // range is unspecified by default
- decryptKey: decryptSSEC,
+ encryption: sse,
Headers: make(http.Header),
}
// Set the source header
r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
-
- // Assemble decryption headers for upload-part-copy request
- for k, v := range decryptSSEC.getSSEHeaders(true) {
- r.Headers.Set(k, v)
- }
-
return r
}
@@ -245,10 +191,7 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
// Get object info - need size and etag here. Also, decryption
// headers are added to the stat request if given.
var objInfo ObjectInfo
- opts := StatObjectOptions{}
- for k, v := range s.decryptKey.getSSEHeaders(false) {
- opts.Set(k, v)
- }
+ opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}}
objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
if err != nil {
err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
@@ -478,37 +421,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
- h := srcs[0].Headers
- // Add destination encryption headers
- for k, v := range dst.encryption.getSSEHeaders(false) {
- h.Set(k, v)
- }
-
- // If no user metadata is specified (and so, the
- // for-loop below is not entered), metadata from the
- // source is copied to the destination (due to
- // single-part copy-object PUT request behaviour).
- for k, v := range dst.getUserMetaHeadersMap(true) {
- h.Set(k, v)
- }
-
- // Send copy request
- resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
- bucketName: dst.bucket,
- objectName: dst.object,
- customHeader: h,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- // Check if we got an error response.
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, dst.bucket, dst.object)
- }
-
- // Return nil on success.
- return nil
+ return c.CopyObject(dst, srcs[0])
}
// Now, handle multipart-copy cases.
@@ -527,7 +440,8 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
for k, v := range metaMap {
metaHeaders[k] = v
}
- uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
+
+ uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders})
if err != nil {
return err
}
@@ -537,9 +451,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
partIndex := 1
for i, src := range srcs {
h := src.Headers
+ if src.encryption != nil {
+ src.encryption.Marshal(h)
+ }
// Add destination encryption headers
- for k, v := range dst.encryption.getSSEHeaders(false) {
- h.Set(k, v)
+ if dst.encryption != nil {
+ dst.encryption.Marshal(h)
}
// calculate start/end indices of parts after
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
index 2b58220a6..a852220a2 100644
--- a/vendor/github.com/minio/minio-go/api-get-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -18,14 +18,11 @@
package minio
import (
+ "context"
"io"
"os"
"path/filepath"
- "github.com/minio/minio-go/pkg/encrypt"
-
- "context"
-
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -40,14 +37,6 @@ func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObje
return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
}
-// FGetEncryptedObject - Decrypt and store an object at filePath.
-func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error {
- if materials == nil {
- return ErrInvalidArgument("Unable to recognize empty encryption properties")
- }
- return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials})
-}
-
// fGetObjectWithContext - fgetObject wrapper function with context
func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
// Input validation.
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index 50bbc2201..0bf556ec6 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -27,20 +27,9 @@ import (
"sync"
"time"
- "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
-// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
-// returned stream should be closed by the caller.
-func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
- if encryptMaterials == nil {
- return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
- }
-
- return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials})
-}
-
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
@@ -127,6 +116,9 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
+
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil {
resCh <- getResponse{
@@ -142,6 +134,8 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
if etag != "" {
opts.SetMatchETag(etag)
}
@@ -381,13 +375,11 @@ func (o *Object) Stat() (ObjectInfo, error) {
// This is the first request.
if !o.isStarted || !o.objectInfoSet {
- statReq := getRequest{
+ // Send the request and get the response.
+ _, err := o.doGetRequest(getRequest{
isFirstReq: !o.isStarted,
settingObjectInfo: !o.objectInfoSet,
- }
-
- // Send the request and get the response.
- _, err := o.doGetRequest(statReq)
+ })
if err != nil {
o.prevErr = err
return ObjectInfo{}, err
@@ -493,7 +485,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
// Negative offset is valid for whence of '2'.
if offset < 0 && whence != 2 {
- return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
+ return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
}
// This is the first request. So before anything else
@@ -662,15 +654,6 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op
Metadata: extractObjMetadata(resp.Header),
}
- reader := resp.Body
- if opts.Materials != nil {
- err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey))
- if err != nil {
- return nil, ObjectInfo{}, err
- }
- reader = opts.Materials
- }
-
// do not close body here, caller will close
- return reader, objectStat, nil
+ return resp.Body, objectStat, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go
index dd70415cd..a5a87526f 100644
--- a/vendor/github.com/minio/minio-go/api-get-options.go
+++ b/vendor/github.com/minio/minio-go/api-get-options.go
@@ -28,9 +28,8 @@ import (
// GetObjectOptions are used to specify additional headers or options
// during GET requests.
type GetObjectOptions struct {
- headers map[string]string
-
- Materials encrypt.Materials
+ headers map[string]string
+ ServerSideEncryption encrypt.ServerSide
}
// StatObjectOptions are used to specify additional headers or options
@@ -45,6 +44,9 @@ func (o GetObjectOptions) Header() http.Header {
for k, v := range o.headers {
headers.Set(k, v)
}
+ if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 {
+ o.ServerSideEncryption.Marshal(headers)
+ }
return headers
}
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index a4259c9d7..12d4c590e 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -19,62 +19,32 @@ package minio
import (
"context"
- "encoding/json"
"io/ioutil"
"net/http"
"net/url"
- "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketPolicy - get bucket policy at a given path.
-func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
+func (c Client) GetBucketPolicy(bucketName string) (string, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return policy.BucketPolicyNone, err
+ return "", err
}
- if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
- return policy.BucketPolicyNone, err
- }
- policyInfo, err := c.getBucketPolicy(bucketName)
- if err != nil {
- errResponse := ToErrorResponse(err)
- if errResponse.Code == "NoSuchBucketPolicy" {
- return policy.BucketPolicyNone, nil
- }
- return policy.BucketPolicyNone, err
- }
- return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
-}
-
-// ListBucketPolicies - list all policies for a given prefix and all its children.
-func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
- // Input validation.
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return map[string]policy.BucketPolicy{}, err
- }
- if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
- return map[string]policy.BucketPolicy{}, err
- }
- policyInfo, err := c.getBucketPolicy(bucketName)
+ bucketPolicy, err := c.getBucketPolicy(bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
if errResponse.Code == "NoSuchBucketPolicy" {
- return map[string]policy.BucketPolicy{}, nil
+ return "", nil
}
- return map[string]policy.BucketPolicy{}, err
+ return "", err
}
- return policy.GetPolicies(policyInfo.Statements, bucketName), nil
-}
-
-// Default empty bucket access policy.
-var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
- Version: "2012-10-17",
+ return bucketPolicy, nil
}
// Request server for current bucket policy.
-func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
+func (c Client) getBucketPolicy(bucketName string) (string, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@@ -89,21 +59,20 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e
defer closeResponse(resp)
if err != nil {
- return emptyBucketAccessPolicy, err
+ return "", err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
+ return "", httpRespToErrorResponse(resp, bucketName, "")
}
}
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return emptyBucketAccessPolicy, err
+ return "", err
}
- policy := policy.BucketAccessPolicy{}
- err = json.Unmarshal(bucketPolicyBuf, &policy)
+ policy := string(bucketPolicyBuf)
return policy, err
}
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
index 578fdea8e..1c01e362b 100644
--- a/vendor/github.com/minio/minio-go/api-notification.go
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -205,13 +205,11 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
continue
}
- // Send notifications on channel only if there are events received.
- if len(notificationInfo.Records) > 0 {
- select {
- case notificationInfoCh <- notificationInfo:
- case <-doneCh:
- return
- }
+ // Send notificationInfo
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-doneCh:
+ return
}
}
// Look for any underlying errors.
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index 8b0258948..a2c060786 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -119,7 +119,9 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
- u, err = c.makeTargetURL(bucketName, "", location, nil)
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
+
+ u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index bb583a78f..8920ac742 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -20,13 +20,12 @@ package minio
import (
"bytes"
"context"
- "encoding/json"
"encoding/xml"
- "fmt"
+ "io/ioutil"
"net/http"
"net/url"
+ "strings"
- "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -101,73 +100,40 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
// SetBucketPolicy set the access permissions on an existing bucket.
//
-// For example
-//
-// none - owner gets full access [default].
-// readonly - anonymous get access for everyone at a given object prefix.
-// readwrite - anonymous list/put/delete access to a given object prefix.
-// writeonly - anonymous put/delete access to a given object prefix.
-func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
+func (c Client) SetBucketPolicy(bucketName, policy string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
- return err
- }
-
- if !bucketPolicy.IsValidBucketPolicy() {
- return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
- }
-
- policyInfo, err := c.getBucketPolicy(bucketName)
- errResponse := ToErrorResponse(err)
- if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
- return err
- }
-
- if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
- // As the request is for removing policy and the bucket
- // has empty policy statements, just return success.
- return nil
- }
-
- policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
// Save the updated policies.
- return c.putBucketPolicy(bucketName, policyInfo)
+ return c.putBucketPolicy(bucketName, policy)
}
// Saves a new bucket policy.
-func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
+func (c Client) putBucketPolicy(bucketName, policy string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- // If there are no policy statements, we should remove entire policy.
- if len(policyInfo.Statements) == 0 {
- return c.removeBucketPolicy(bucketName)
- }
-
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
- policyBytes, err := json.Marshal(&policyInfo)
+ // Content-length is mandatory for put policy request
+ policyReader := strings.NewReader(policy)
+ b, err := ioutil.ReadAll(policyReader)
if err != nil {
return err
}
- policyBuffer := bytes.NewReader(policyBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: policyBuffer,
- contentLength: int64(len(policyBytes)),
- contentMD5Base64: sumMD5Base64(policyBytes),
- contentSHA256Hex: sum256Hex(policyBytes),
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyReader,
+ contentLength: int64(len(b)),
}
// Execute PUT to upload a new bucket policy.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go
index a6f23dcaa..ff4663e2f 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-context.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-context.go
@@ -29,11 +29,5 @@ func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName
if err != nil {
return 0, err
}
- if opts.EncryptMaterials != nil {
- if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil {
- return 0, err
- }
- return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts)
- }
return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index 8032009dc..acd195fcd 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -17,7 +17,41 @@
package minio
+import (
+ "context"
+ "net/http"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
// CopyObject - copy a source object into a new object
func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
- return c.ComposeObject(dst, []SourceInfo{src})
+ header := make(http.Header)
+ for k, v := range src.Headers {
+ header[k] = v
+ }
+ if src.encryption != nil {
+ encrypt.SSECopy(src.encryption).Marshal(header)
+ }
+ if dst.encryption != nil {
+ dst.encryption.Marshal(header)
+ }
+ for k, v := range dst.getUserMetaHeadersMap(true) {
+ header.Set(k, v)
+ }
+
+ resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{
+ bucketName: dst.bucket,
+ objectName: dst.object,
+ customHeader: header,
+ })
+ if err != nil {
+ return err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, dst.bucket, dst.object)
+ }
+ return nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
deleted file mode 100644
index 87dd1ab1a..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "context"
- "io"
-
- "github.com/minio/minio-go/pkg/encrypt"
-)
-
-// PutEncryptedObject - Encrypt and store object.
-func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) {
-
- if encryptMaterials == nil {
- return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
- }
-
- if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
- return 0, err
- }
-
- return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials})
-}
-
-// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath.
-func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) {
- return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials})
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index 5262e8b91..52dc069d0 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -33,6 +33,7 @@ import (
"strconv"
"strings"
+ "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -138,7 +139,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
- md5Base64, sha256Hex, int64(length), opts.UserMetadata)
+ md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption)
if err != nil {
return totalUploadedSize, err
}
@@ -226,11 +227,9 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN
return initiateMultipartUploadResult, nil
}
-const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
-
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
- partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
+ partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
@@ -260,13 +259,9 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
// Set encryption headers, if any.
customHeader := make(http.Header)
- // for k, v := range metadata {
- // if len(v) > 0 {
- // if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
- // customHeader.Set(k, v)
- // }
- // }
- // }
+ if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS {
+ sse.Marshal(customHeader)
+ }
reqMetadata := requestMetadata{
bucketName: bucketName,
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
index be1dc57ef..211d1c23c 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -167,7 +167,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum,
- "", "", partSize, opts.UserMetadata)
+ "", "", partSize, opts.ServerSideEncryption)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
@@ -280,7 +280,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa
var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize),
- partNumber, "", "", partSize, opts.UserMetadata)
+ partNumber, "", "", partSize, opts.ServerSideEncryption)
if err != nil {
return totalUploadedSize, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index ca4052225..2402a7167 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -33,15 +33,16 @@ import (
// PutObjectOptions represents options specified by user for PutObject call
type PutObjectOptions struct {
- UserMetadata map[string]string
- Progress io.Reader
- ContentType string
- ContentEncoding string
- ContentDisposition string
- CacheControl string
- EncryptMaterials encrypt.Materials
- NumThreads uint
- StorageClass string
+ UserMetadata map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ ContentLanguage string
+ CacheControl string
+ ServerSideEncryption encrypt.ServerSide
+ NumThreads uint
+ StorageClass string
}
// getNumThreads - gets the number of threads to be used in the multipart
@@ -71,19 +72,20 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.ContentDisposition != "" {
header["Content-Disposition"] = []string{opts.ContentDisposition}
}
+ if opts.ContentLanguage != "" {
+ header["Content-Language"] = []string{opts.ContentLanguage}
+ }
if opts.CacheControl != "" {
header["Cache-Control"] = []string{opts.CacheControl}
}
- if opts.EncryptMaterials != nil {
- header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()}
- header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()}
- header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
+ if opts.ServerSideEncryption != nil {
+ opts.ServerSideEncryption.Marshal(header)
}
if opts.StorageClass != "" {
header[amzStorageClass] = []string{opts.StorageClass}
}
for k, v := range opts.UserMetadata {
- if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) && !isStorageClassHeader(k) {
+ if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) {
header["X-Amz-Meta-"+k] = []string{v}
} else {
header[k] = []string{v}
@@ -92,11 +94,10 @@ func (opts PutObjectOptions) Header() (header http.Header) {
return
}
-// validate() checks if the UserMetadata map has standard headers or client side
-// encryption headers and raises an error if so.
+// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
func (opts PutObjectOptions) validate() (err error) {
for k, v := range opts.UserMetadata {
- if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isCSEHeader(k) || isStorageClassHeader(k) {
+ if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
return ErrInvalidArgument(k + " unsupported user defined metadata name")
}
if !httplex.ValidHeaderFieldValue(v) {
@@ -217,7 +218,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
- "", "", int64(length), opts.UserMetadata)
+ "", "", int64(length), opts.ServerSideEncryption)
if err != nil {
return totalUploadedSize, err
}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index f14b2eb7f..c2ffcdd34 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -129,10 +129,8 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh
}
}
-// RemoveObjects remove multiples objects from a bucket.
-// The list of objects to remove are received from objectsCh.
-// Remove failures are sent back via error channel.
-func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation.
+func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
@@ -189,7 +187,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
+ resp, err := c.executeMethod(ctx, "POST", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: bytes.NewReader(removeBytes),
@@ -197,6 +195,12 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
contentMD5Base64: sumMD5Base64(removeBytes),
contentSHA256Hex: sum256Hex(removeBytes),
})
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ e := httpRespToErrorResponse(resp, bucketName, "")
+ errorCh <- RemoveObjectError{ObjectName: "", Err: e}
+ }
+ }
if err != nil {
for _, b := range batch {
errorCh <- RemoveObjectError{ObjectName: b, Err: err}
@@ -213,6 +217,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
return errorCh
}
+// RemoveObjects removes multiple objects from a bucket.
+// The list of objects to remove are received from objectsCh.
+// Remove failures are sent back via error channel.
+func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh)
+}
+
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 8904dd678..5356f8a4f 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -115,7 +115,7 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o
return ObjectInfo{}, err
}
if resp != nil {
- if resp.StatusCode != http.StatusOK {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index fa8595bcd..daf3ec2c2 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
+ * Copyright 2015-2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -99,7 +99,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "4.0.7"
+ libraryVersion = "6.0.0"
)
// User Agent should always following the below style.
@@ -258,8 +258,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
}
switch {
case signerType.IsV2():
- // Add signature version '2' authorization header.
- req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ return errors.New("signature V2 cannot support redirection")
case signerType.IsV4():
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
}
@@ -288,7 +287,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- Transport: defaultMinioTransport,
+ Transport: DefaultTransport,
CheckRedirect: clnt.redirectHeaders,
}
@@ -338,7 +337,7 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
- // api.SetTransport(tr)
+ // api.SetCustomTransport(tr)
//
if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
@@ -694,8 +693,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
}
+ // Look if target url supports virtual host.
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName)
+
// Construct a new target URL.
- targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues)
if err != nil {
return nil, err
}
@@ -737,7 +739,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
if signerType.IsV2() {
// Presign URL with signature v2.
- req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
+ req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
} else if signerType.IsV4() {
// Presign URL with signature v4.
req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
@@ -783,7 +785,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
switch {
case signerType.IsV2():
// Add signature version '2' authorization header.
- req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
// Streaming signature is used by default for a PUT object request. Additionally we also
// look if the initialized client is secure, if yes then we don't need to perform
@@ -815,7 +817,7 @@ func (c Client) setUserAgent(req *http.Request) {
}
// makeTargetURL make a new target url.
-func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
+func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
@@ -854,8 +856,6 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
- // Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
// virtual host style.
@@ -883,12 +883,17 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
// returns true if virtual hosted style requests are to be used.
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
+ if bucketName == "" {
+ return false
+ }
+
if c.lookup == BucketLookupDNS {
return true
}
if c.lookup == BucketLookupPath {
return false
}
+
// default to virtual only for Amazon/Google storage. In all other cases use
// path style requests
return s3utils.IsVirtualHostSupported(url, bucketName)
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
index b93b4d45d..aa9f840e5 100644
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -19,6 +19,7 @@ install:
- go get -u github.com/golang/lint/golint
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
+ - go get -u golang.org/x/crypto/argon2
- go get -t ./...
# to run your custom scripts instead of automatic MSBuild
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 5d56cdf42..cac7ad792 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -203,7 +203,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
}
if signerType.IsV2() {
- req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ // Get Bucket Location calls should be always path style
+ isVirtualHost := false
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
index 1b9d6a0c7..ea303dd9d 100644
--- a/vendor/github.com/minio/minio-go/bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -19,7 +19,8 @@ package minio
import (
"encoding/xml"
- "reflect"
+
+ "github.com/minio/minio-go/pkg/set"
)
// NotificationEventType is a S3 notification event associated to the bucket notification configuration
@@ -96,7 +97,7 @@ type NotificationConfig struct {
// NewNotificationConfig creates one notification config and sets the given ARN
func NewNotificationConfig(arn Arn) NotificationConfig {
- return NotificationConfig{Arn: arn}
+ return NotificationConfig{Arn: arn, Filter: &Filter{}}
}
// AddEvents adds one event to the current notification config
@@ -163,39 +164,79 @@ type BucketNotification struct {
}
// AddTopic adds a given topic config to the general bucket notification config
-func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
+func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool {
newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
for _, n := range b.TopicConfigs {
- if reflect.DeepEqual(n, newTopicConfig) {
- // Avoid adding duplicated entry
- return
+ // If new config matches existing one
+ if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range topicConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
}
}
b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+ return true
}
// AddQueue adds a given queue config to the general bucket notification config
-func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
+func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool {
newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
for _, n := range b.QueueConfigs {
- if reflect.DeepEqual(n, newQueueConfig) {
- // Avoid adding duplicated entry
- return
+ if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range queueConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
}
}
b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+ return true
}
// AddLambda adds a given lambda config to the general bucket notification config
-func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
+func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool {
newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
for _, n := range b.LambdaConfigs {
- if reflect.DeepEqual(n, newLambdaConfig) {
- // Avoid adding duplicated entry
- return
+ if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range lambdaConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
}
}
b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+ return true
}
// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 84b6cfdf3..7db5a99af 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -59,12 +59,5 @@ const (
iso8601DateFormat = "20060102T150405Z"
)
-// Encryption headers stored along with the object.
-const (
- amzHeaderIV = "X-Amz-Meta-X-Amz-Iv"
- amzHeaderKey = "X-Amz-Meta-X-Amz-Key"
- amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
-)
-
// Storage class header constant.
const amzStorageClass = "X-Amz-Storage-Class"
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
index 4245fc065..cf2ba0537 100644
--- a/vendor/github.com/minio/minio-go/core.go
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -21,8 +21,6 @@ import (
"context"
"io"
"strings"
-
- "github.com/minio/minio-go/pkg/policy"
)
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
@@ -78,6 +76,8 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
opts.ContentEncoding = v
} else if strings.ToLower(k) == "content-disposition" {
opts.ContentDisposition = v
+ } else if strings.ToLower(k) == "content-language" {
+ opts.ContentLanguage = v
} else if strings.ToLower(k) == "content-type" {
opts.ContentType = v
} else if strings.ToLower(k) == "cache-control" {
@@ -103,13 +103,7 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
- return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
-}
-
-// PutObjectPartWithMetadata - upload an object part with additional request metadata.
-func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
- size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
- return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
+ return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@@ -131,12 +125,12 @@ func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
}
// GetBucketPolicy - fetches bucket access policy for a given bucket.
-func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
+func (c Core) GetBucketPolicy(bucket string) (string, error) {
return c.getBucketPolicy(bucket)
}
// PutBucketPolicy - applies a new bucket access policy for a given bucket.
-func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error {
+func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error {
return c.putBucketPolicy(bucket, bucketPolicy)
}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
index c4156c293..c8236d69b 100644
--- a/vendor/github.com/minio/minio-go/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -22,7 +22,6 @@ package main
import (
"bytes"
"context"
- "encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -45,7 +44,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/minio/minio-go/pkg/encrypt"
- "github.com/minio/minio-go/pkg/policy"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
@@ -707,13 +705,12 @@ func testPutObjectWithMetadata() {
successLogger(testName, function, args, startTime).Info()
}
-// Test put object with streaming signature.
-func testPutObjectStreaming() {
+func testPutObjectWithContentLanguage() {
// initialize logging params
objectName := "test-object"
startTime := time.Now()
testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader,size,opts)"
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": objectName,
@@ -752,21 +749,29 @@ func testPutObjectStreaming() {
return
}
- // Upload an object.
- sizes := []int64{0, 64*1024 - 1, 64 * 1024}
+ data := bytes.Repeat([]byte("a"), int(0))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{
+ ContentLanguage: "en-US",
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
- for _, size := range sizes {
- data := bytes.Repeat([]byte("a"), int(size))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
- return
- }
+ if n != 0 {
+ logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err)
+ return
+ }
- if n != size {
- logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
- return
- }
+ objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if objInfo.Metadata.Get("Content-Language") != "en-US" {
+ logError(testName, function, args, startTime, "", "Expected content-language 'en-US' doesn't match with StatObject return value", err)
+ return
}
// Delete all objects and buckets
@@ -778,23 +783,25 @@ func testPutObjectStreaming() {
successLogger(testName, function, args, startTime).Info()
}
-// Test listing partially uploaded objects.
-func testListPartiallyUploaded() {
+// Test put object with streaming signature.
+func testPutObjectStreaming() {
// initialize logging params
+ objectName := "test-object"
startTime := time.Now()
testName := getFuncName()
- function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "isRecursive": "",
+ "bucketName": "",
+ "objectName": objectName,
+ "size": -1,
+ "opts": "",
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
- c, err := minio.New(
+ c, err := minio.NewV4(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
@@ -805,16 +812,15 @@ func testListPartiallyUploaded() {
return
}
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
-
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
@@ -822,46 +828,19 @@ func testListPartiallyUploaded() {
return
}
- bufSize := dataFileMap["datafile-65-MB"]
- r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2))
+ // Upload an object.
+ sizes := []int64{0, 64*1024 - 1, 64 * 1024}
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25)
- if cerr != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- i++
- r.Seek(0, 0)
+ for _, size := range sizes {
+ data := bytes.Repeat([]byte("a"), int(size))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
}
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
- }()
-
- objectName := bucketName + "-resumable"
- args["objectName"] = objectName
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "PutObject should fail", err)
- return
- }
- if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- logError(testName, function, args, startTime, "", "String not found in PutObject output", err)
- return
- }
-
- doneCh := make(chan struct{})
- defer close(doneCh)
- isRecursive := true
- args["isRecursive"] = isRecursive
-
- multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
- for multiPartObject := range multiPartObjectCh {
- if multiPartObject.Err != nil {
- logError(testName, function, args, startTime, "", "Multipart object error", multiPartObject.Err)
+ if n != size {
+ logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
return
}
}
@@ -1101,27 +1080,26 @@ func testGetObjectClosedTwice() {
successLogger(testName, function, args, startTime).Info()
}
-// Test removing multiple objects with Remove API
-func testRemoveMultipleObjects() {
- // initialize logging params
+// Test RemoveObjectsWithContext request context cancels after timeout
+func testRemoveObjectsWithContext() {
+ // Initialize logging params.
startTime := time.Now()
testName := getFuncName()
- function := "RemoveObjects(bucketName, objectsCh)"
+ function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)"
args := map[string]interface{}{
"bucketName": "",
}
- // Seed random based on current time.
+ // Seed random based on current tie.
rand.Seed(time.Now().Unix())
- // Instantiate new minio client object.
+ // Instantiate new minio client.
c, err := minio.New(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableHTTPS)),
)
-
if err != nil {
logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
return
@@ -1129,7 +1107,6 @@ func testRemoveMultipleObjects() {
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
@@ -1141,19 +1118,16 @@ func testRemoveMultipleObjects() {
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
}
+ // Generate put data.
r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
- // Multi remove of 1100 objects
- nrObjects := 200
-
+ // Multi remove of 20 objects.
+ nrObjects := 20
objectsCh := make(chan string)
-
go func() {
defer close(objectsCh)
- // Upload objects and send them to objectsCh
for i := 0; i < nrObjects; i++ {
objectName := "sample" + strconv.Itoa(i) + ".txt"
_, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
@@ -1164,35 +1138,52 @@ func testRemoveMultipleObjects() {
objectsCh <- objectName
}
}()
+ // Set context to cancel in 1 nanosecond.
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
- // Call RemoveObjects API
- errorCh := c.RemoveObjects(bucketName, objectsCh)
-
- // Check if errorCh doesn't receive any error
+ // Call RemoveObjectsWithContext API with short timeout.
+ errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh)
+ // Check for error.
+ select {
+ case r := <-errorCh:
+ if r.Err == nil {
+ logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err)
+ return
+ }
+ }
+ // Set context with longer timeout.
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+ defer cancel()
+ // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed.
+ errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh)
select {
case r, more := <-errorCh:
- if more {
+ if more || r.Err != nil {
logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
return
}
}
- // Delete all objects and buckets
+ // Delete all objects and buckets.
if err = cleanupBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "Cleanup failed", err)
return
}
-
successLogger(testName, function, args, startTime).Info()
}
-// Tests removing partially uploaded objects.
-func testRemovePartiallyUploaded() {
+// Test removing multiple objects with Remove API
+func testRemoveMultipleObjects() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
- function := "RemoveIncompleteUpload(bucketName, objectName)"
- args := map[string]interface{}{}
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1204,6 +1195,7 @@ func testRemovePartiallyUploaded() {
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableHTTPS)),
)
+
if err != nil {
logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
return
@@ -1226,40 +1218,39 @@ func testRemovePartiallyUploaded() {
return
}
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 1100 objects
+ nrObjects := 200
+
+ objectsCh := make(chan string)
- reader, writer := io.Pipe()
go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, 128*1024)
- if cerr != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ continue
}
- i++
- r.Seek(0, 0)
+ objectsCh <- objectName
}
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
}()
- objectName := bucketName + "-resumable"
- args["objectName"] = objectName
+ // Call RemoveObjects API
+ errorCh := c.RemoveObjects(bucketName, objectsCh)
- _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "PutObject should fail", err)
- return
- }
- if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- logError(testName, function, args, startTime, "", "String not found", err)
- return
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
- return
+ // Check if errorCh doesn't receive any error
+ select {
+ case r, more := <-errorCh:
+ if more {
+ logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
+ return
+ }
}
+
// Delete all objects and buckets
if err = cleanupBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "Cleanup failed", err)
@@ -1912,6 +1903,14 @@ func testGetObjectReadSeekFunctional() {
return
}
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ }()
+
// Generate 33K of data.
bufSize := dataFileMap["datafile-33-kB"]
var reader = getDataReader("datafile-33-kB")
@@ -1938,14 +1937,6 @@ func testGetObjectReadSeekFunctional() {
return
}
- defer func() {
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- }()
-
// Read the data back
r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
@@ -2127,7 +2118,7 @@ func testGetObjectReadAtFunctional() {
buf3 := make([]byte, 512)
buf4 := make([]byte, 512)
- // Test readAt before stat is called.
+ // Test readAt before stat is called such that objectInfo doesn't change.
m, err := r.ReadAt(buf1, offset)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAt failed", err)
@@ -2167,6 +2158,7 @@ func testGetObjectReadAtFunctional() {
logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
return
}
+
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
@@ -2411,9 +2403,10 @@ func testPresignedPostPolicy() {
}
expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
+ expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
if val, ok := res.Header["Location"]; ok {
- if val[0] != expectedLocation {
+ if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
return
}
@@ -2588,6 +2581,10 @@ func testCopyObject() {
return
}
+ // Close all the get readers before proceeding with CopyObject operations.
+ r.Close()
+ readerCopy.Close()
+
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
@@ -2608,6 +2605,37 @@ func testCopyObject() {
return
}
+ // Perform the Copy which should update only metadata.
+ src = minio.NewSourceInfo(bucketName, objectName, nil)
+ dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{
+ "Copy": "should be same",
+ })
+ args["dst"] = dst
+ args["src"] = src
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err)
+ return
+ }
+
+ stOpts := minio.StatObjectOptions{}
+ stOpts.SetMatchETag(objInfo.ETag)
+ objInfo, err = c.StatObject(bucketName, objectName, stOpts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err)
+ return
+ }
+
+ if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" {
+ logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err)
+ return
+ }
+
// Delete all objects and buckets
if err = cleanupBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "Cleanup failed", err)
@@ -2620,23 +2648,207 @@ func testCopyObject() {
successLogger(testName, function, args, startTime).Info()
}
-// TestEncryptionPutGet tests client side encryption
-func testEncryptionPutGet() {
+// Tests SSE-C get object ReaderSeeker interface methods.
+func testEncryptedGetObjectReadSeekFunctional() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
- function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "cbcMaterials": "",
- "metadata": "",
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ }()
+
+ // Generate 65MiB of data.
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+ }
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, 0, io.EOF, false, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
+ // Test with invalid whence
+ {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ if err != nil && testCase.err == nil {
+ // We expected success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err == nil && testCase.err != nil {
+ // We expected failure, but got success.
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ if err != nil && testCase.err != nil {
+ if err.Error() != testCase.err.Error() {
+ // We expect a specific error
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
+ return
+ }
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ logError(testName, function, args, startTime, "",
+ fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
+ return
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests SSE-C get object ReaderAt interface methods.
+func testEncryptedGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
- // Instantiate new minio client object
- c, err := minio.NewV4(
+ // Instantiate new minio client object.
+ c, err := minio.New(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
@@ -2664,104 +2876,235 @@ func testEncryptionPutGet() {
return
}
- // Generate a symmetric key
- symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+ // Generate 65MiB of data.
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
+ defer reader.Close()
- // Generate an assymmetric key from predefine public and private certificates
- privateKey, err := hex.DecodeString(
- "30820277020100300d06092a864886f70d0101010500048202613082025d" +
- "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
- "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
- "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
- "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
- "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
- "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
- "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
- "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
- "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
- "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
- "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
- "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
- "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
- "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
- "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
- "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
- "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
- "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
- "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
- "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
- "9945cb5c7d")
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+ buf, err := ioutil.ReadAll(reader)
if err != nil {
- logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
}
- publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
- "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
- "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
- "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
- "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
- "80a89e43f29b570203010001")
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
if err != nil {
- logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
return
}
- // Generate an asymmetric key
- asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+ // read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{
+ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
+ })
if err != nil {
- logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called such that objectInfo doesn't change.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// TestEncryptionPutGet tests client side encryption
+func testEncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "sse": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
return
}
testCases := []struct {
- buf []byte
- encKey encrypt.Key
+ buf []byte
}{
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
}
+ const password = "correct horse battery staple" // https://xkcd.com/936/
+
for i, testCase := range testCases {
// Generate a random object name
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
// Secured object
- cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
- args["cbcMaterials"] = cbcMaterials
-
- if err != nil {
- logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
- return
- }
+ sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ args["sse"] = sse
// Put encrypted data
- _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials)
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
if err != nil {
logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
return
}
// Read the data back
- r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
if err != nil {
logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
return
@@ -2801,13 +3144,13 @@ func testEncryptionFPut() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
- function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)"
+ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "filePath": "",
- "contentType": "",
- "cbcMaterials": "",
+ "bucketName": "",
+ "objectName": "",
+ "filePath": "",
+ "contentType": "",
+ "sse": "",
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2841,98 +3184,36 @@ func testEncryptionFPut() {
return
}
- // Generate a symmetric key
- symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
- // Generate an assymmetric key from predefine public and private certificates
- privateKey, err := hex.DecodeString(
- "30820277020100300d06092a864886f70d0101010500048202613082025d" +
- "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
- "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
- "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
- "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
- "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
- "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
- "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
- "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
- "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
- "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
- "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
- "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
- "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
- "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
- "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
- "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
- "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
- "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
- "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
- "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
- "9945cb5c7d")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
- return
- }
-
- publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
- "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
- "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
- "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
- "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
- "80a89e43f29b570203010001")
- if err != nil {
- logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
- return
- }
-
- // Generate an asymmetric key
- asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
- return
- }
-
// Object custom metadata
customContentType := "custom/contenttype"
args["metadata"] = customContentType
testCases := []struct {
- buf []byte
- encKey encrypt.Key
+ buf []byte
}{
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
+ {buf: bytes.Repeat([]byte("F"), 0)},
+ {buf: bytes.Repeat([]byte("F"), 1)},
+ {buf: bytes.Repeat([]byte("F"), 15)},
+ {buf: bytes.Repeat([]byte("F"), 16)},
+ {buf: bytes.Repeat([]byte("F"), 17)},
+ {buf: bytes.Repeat([]byte("F"), 31)},
+ {buf: bytes.Repeat([]byte("F"), 32)},
+ {buf: bytes.Repeat([]byte("F"), 33)},
+ {buf: bytes.Repeat([]byte("F"), 1024)},
+ {buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ const password = "correct horse battery staple" // https://xkcd.com/936/
for i, testCase := range testCases {
// Generate a random object name
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
// Secured object
- cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
- args["cbcMaterials"] = cbcMaterials
+ sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
+ args["sse"] = sse
- if err != nil {
- logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
- return
- }
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
file, err := os.Create(fileName)
@@ -2947,13 +3228,13 @@ func testEncryptionFPut() {
}
file.Close()
// Put encrypted data
- if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil {
+ if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
return
}
// Read the data back
- r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
if err != nil {
logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
return
@@ -3101,7 +3382,7 @@ func testFunctional() {
startTime := time.Now()
testName := getFuncName()
function := "testFunctional()"
- function_all := ""
+ functionAll := ""
args := map[string]interface{}{}
// Seed random based on current time.
@@ -3129,7 +3410,7 @@ func testFunctional() {
// Make a new bucket.
function = "MakeBucket(bucketName, region)"
- function_all = "MakeBucket(bucketName, region)"
+ functionAll = "MakeBucket(bucketName, region)"
args["bucketName"] = bucketName
err = c.MakeBucket(bucketName, "us-east-1")
@@ -3158,7 +3439,7 @@ func testFunctional() {
// Verify if bucket exits and you have access.
var exists bool
function = "BucketExists(bucketName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
}
@@ -3174,120 +3455,126 @@ func testFunctional() {
}
// Asserting the default bucket policy.
- function = "GetBucketPolicy(bucketName, objectPrefix)"
- function_all += ", " + function
+ function = "GetBucketPolicy(bucketName)"
+ functionAll += ", " + function
args = map[string]interface{}{
- "bucketName": bucketName,
- "objectPrefix": "",
+ "bucketName": bucketName,
}
- policyAccess, err := c.GetBucketPolicy(bucketName, "")
-
+ nilPolicy, err := c.GetBucketPolicy(bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
return
}
- if policyAccess != "none" {
- logError(testName, function, args, startTime, "", "policy should be set to none", err)
+ if nilPolicy != "" {
+ logError(testName, function, args, startTime, "", "policy should be set to nil", err)
return
}
// Set the bucket policy to 'public readonly'.
- function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
- function_all += ", " + function
+ function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
+ functionAll += ", " + function
+
+ readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
+
args = map[string]interface{}{
"bucketName": bucketName,
- "objectPrefix": "",
- "bucketPolicy": policy.BucketPolicyReadOnly,
+ "bucketPolicy": readOnlyPolicy,
}
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+
+ err = c.SetBucketPolicy(bucketName, readOnlyPolicy)
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
}
// should return policy `readonly`.
- function = "GetBucketPolicy(bucketName, objectPrefix)"
- function_all += ", " + function
+ function = "GetBucketPolicy(bucketName)"
+ functionAll += ", " + function
args = map[string]interface{}{
- "bucketName": bucketName,
- "objectPrefix": "",
+ "bucketName": bucketName,
}
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ readOnlyPolicyRet, err := c.GetBucketPolicy(bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
return
}
- if policyAccess != "readonly" {
+
+ if strings.Compare(readOnlyPolicyRet, readOnlyPolicy) != 0 {
logError(testName, function, args, startTime, "", "policy should be set to readonly", err)
return
}
// Make the bucket 'public writeonly'.
- function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
- function_all += ", " + function
+ function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
+ functionAll += ", " + function
+
+ writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
+
args = map[string]interface{}{
"bucketName": bucketName,
- "objectPrefix": "",
- "bucketPolicy": policy.BucketPolicyWriteOnly,
+ "bucketPolicy": writeOnlyPolicy,
}
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+ err = c.SetBucketPolicy(bucketName, writeOnlyPolicy)
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
}
// should return policy `writeonly`.
- function = "GetBucketPolicy(bucketName, objectPrefix)"
- function_all += ", " + function
+ function = "GetBucketPolicy(bucketName)"
+ functionAll += ", " + function
args = map[string]interface{}{
- "bucketName": bucketName,
- "objectPrefix": "",
+ "bucketName": bucketName,
}
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ writeOnlyPolicyRet, err := c.GetBucketPolicy(bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
return
}
- if policyAccess != "writeonly" {
+
+ if strings.Compare(writeOnlyPolicyRet, writeOnlyPolicy) != 0 {
logError(testName, function, args, startTime, "", "policy should be set to writeonly", err)
return
}
+
// Make the bucket 'public read/write'.
- function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
- function_all += ", " + function
+ function = "SetBucketPolicy(bucketName, readWritePolicy)"
+ functionAll += ", " + function
+
+ readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::` + bucketName + `"],"Sid":""}]}`
+
args = map[string]interface{}{
"bucketName": bucketName,
- "objectPrefix": "",
- "bucketPolicy": policy.BucketPolicyReadWrite,
+ "bucketPolicy": readWritePolicy,
}
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ err = c.SetBucketPolicy(bucketName, readWritePolicy)
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
}
// should return policy `readwrite`.
- function = "GetBucketPolicy(bucketName, objectPrefix)"
- function_all += ", " + function
+ function = "GetBucketPolicy(bucketName)"
+ functionAll += ", " + function
args = map[string]interface{}{
- "bucketName": bucketName,
- "objectPrefix": "",
+ "bucketName": bucketName,
}
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
-
+ readWritePolicyRet, err := c.GetBucketPolicy(bucketName)
if err != nil {
logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
return
}
- if policyAccess != "readwrite" {
+
+ if strings.Compare(readWritePolicyRet, readWritePolicy) != 0 {
logError(testName, function, args, startTime, "", "policy should be set to readwrite", err)
return
}
+
// List all buckets.
function = "ListBuckets()"
- function_all += ", " + function
+ functionAll += ", " + function
args = nil
buckets, err := c.ListBuckets()
@@ -3320,7 +3607,7 @@ func testFunctional() {
buf := bytes.Repeat([]byte("f"), 1<<19)
function = "PutObject(bucketName, objectName, reader, contentType)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3363,7 +3650,7 @@ func testFunctional() {
isRecursive := true // Recursive is true.
function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3384,7 +3671,7 @@ func testFunctional() {
objFound = false
isRecursive = true // Recursive is true.
function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3405,7 +3692,7 @@ func testFunctional() {
incompObjNotFound := true
function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3424,7 +3711,7 @@ func testFunctional() {
}
function = "GetObject(bucketName, objectName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3446,9 +3733,10 @@ func testFunctional() {
logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
return
}
+ newReader.Close()
function = "FGetObject(bucketName, objectName, fileName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3462,7 +3750,7 @@ func testFunctional() {
}
function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": "",
@@ -3475,7 +3763,7 @@ func testFunctional() {
// Generate presigned HEAD object url.
function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3504,7 +3792,7 @@ func testFunctional() {
resp.Body.Close()
function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": "",
@@ -3518,7 +3806,7 @@ func testFunctional() {
// Generate presigned GET object url.
function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3592,7 +3880,7 @@ func testFunctional() {
}
function = "PresignedPutObject(bucketName, objectName, expires)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": "",
@@ -3605,7 +3893,7 @@ func testFunctional() {
}
function = "PresignedPutObject(bucketName, objectName, expires)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-presigned",
@@ -3656,7 +3944,7 @@ func testFunctional() {
}
function = "RemoveObject(bucketName, objectName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -3692,7 +3980,7 @@ func testFunctional() {
}
function = "RemoveBucket(bucketName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
}
@@ -3720,7 +4008,7 @@ func testFunctional() {
logError(testName, function, args, startTime, "", "File Remove failed", err)
return
}
- successLogger(testName, function_all, args, startTime).Info()
+ successLogger(testName, functionAll, args, startTime).Info()
}
// Test for validating GetObject Reader* methods functioning when the
@@ -3916,6 +4204,7 @@ func testPutObjectUploadSeekedObject() {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
+ defer obj.Close()
n, err = obj.Seek(int64(offset), 0)
if err != nil {
@@ -4110,89 +4399,6 @@ func testGetObjectClosedTwiceV2() {
successLogger(testName, function, args, startTime).Info()
}
-// Tests removing partially uploaded objects.
-func testRemovePartiallyUploadedV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "RemoveIncompleteUpload(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, 128*1024)
- if cerr != nil {
- logError(testName, function, args, startTime, "", "Copy failed", cerr)
- return
- }
- i++
- r.Seek(0, 0)
- }
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
- }()
-
- objectName := bucketName + "-resumable"
- args["objectName"] = objectName
-
- _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "PutObject should fail", err)
- return
- }
- if err.Error() != "proactively closed to be verified later" {
- logError(testName, function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err)
- return
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
// Tests FPutObject hidden contentType setting
func testFPutObjectV2() {
// initialize logging params
@@ -4504,6 +4710,7 @@ func testGetObjectReadSeekFunctionalV2() {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
+ defer r.Close()
st, err := r.Stat()
if err != nil {
@@ -4667,6 +4874,7 @@ func testGetObjectReadAtFunctionalV2() {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
+ defer r.Close()
st, err := r.Stat()
if err != nil {
@@ -4839,6 +5047,7 @@ func testCopyObjectV2() {
logError(testName, function, args, startTime, "", "Stat failed", err)
return
}
+ r.Close()
// Copy Source
src := minio.NewSourceInfo(bucketName, objectName, nil)
@@ -4921,6 +5130,10 @@ func testCopyObjectV2() {
return
}
+ // Close all the readers.
+ r.Close()
+ readerCopy.Close()
+
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
@@ -5147,6 +5360,106 @@ func testCompose10KSourcesV2() {
testComposeMultipleSources(c)
}
+func testEncryptedEmptyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object"))
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 0
+ var buf []byte // Empty buffer
+ args["objectName"] = "object"
+ _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ // 2. Test CopyObject for an empty object
+ dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil)
+ if err != nil {
+ args["objectName"] = "new-object"
+ function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)"
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ srcInfo := minio.NewSourceInfo(bucketName, "object", sse)
+ if err = c.CopyObject(dstInfo, srcInfo); err != nil {
+ function = "CopyObject(dstInfo, srcInfo)"
+ logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // 3. Test Key rotation
+ newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object"))
+ dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil)
+ if err != nil {
+ args["objectName"] = "new-object"
+ function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)"
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse)
+ if err = c.CopyObject(dstInfo, srcInfo); err != nil {
+ function = "CopyObject(dstInfo, srcInfo)"
+ logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err)
+ return
+ }
+
+ // 4. Download the object.
+ reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer reader.Close()
+
+ decBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err)
+ return
+ }
+ // Delete all objects and buckets
+ delete(args, "objectName")
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
func testEncryptedCopyObjectWrapper(c *minio.Client) {
// initialize logging params
startTime := time.Now()
@@ -5163,26 +5476,24 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
return
}
- key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
- key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
+ sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
+ sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
// 1. create an sse-c encrypted object to copy by uploading
const srcSize = 1024 * 1024
buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- metadata := make(map[string]string)
- for k, v := range key1.GetSSEHeaders() {
- metadata[k] = v
- }
- _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil})
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
+ ServerSideEncryption: sseSrc,
+ })
if err != nil {
logError(testName, function, args, startTime, "", "PutObject call failed", err)
return
}
// 2. copy object and change encryption key
- src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
+ src := minio.NewSourceInfo(bucketName, "srcObject", sseSrc)
args["source"] = src
- dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
+ dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil)
if err != nil {
logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
return
@@ -5196,17 +5507,12 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
}
// 3. get copied object and check if content is equal
- opts := minio.GetObjectOptions{}
- for k, v := range key2.GetSSEHeaders() {
- opts.Set(k, v)
- }
coreClient := minio.Core{c}
- reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts)
+ reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: sseDst})
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
- defer reader.Close()
decBytes, err := ioutil.ReadAll(reader)
if err != nil {
@@ -5217,6 +5523,75 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
return
}
+ reader.Close()
+
+ // Test key rotation for source object in-place.
+ newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key
+ dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ args["destination"] = dst
+
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Get copied object and check if content is equal
+ reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ decBytes, err = ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+ reader.Close()
+
+ // Test in-place decryption.
+ dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ args["destination"] = dst
+
+ src = minio.NewSourceInfo(bucketName, "srcObject", newSSE)
+ args["source"] = src
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Get copied decrypted object and check if content is equal
+ reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer reader.Close()
+
+ decBytes, err = ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+
// Delete all objects and buckets
if err = cleanupBucket(bucketName, c); err != nil {
logError(testName, function, args, startTime, "", "Cleanup failed", err)
@@ -5270,9 +5645,64 @@ func testEncryptedCopyObjectV2() {
return
}
+ // c.TraceOn(os.Stderr)
testEncryptedCopyObjectWrapper(c)
}
+func testDecryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object"
+ if err = c.MakeBucket(bucketName, "us-east-1"); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName))
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{
+ ServerSideEncryption: encryption,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption))
+ args["source"] = src
+ dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ args["destination"] = dst
+
+ if err = c.CopyObject(dst, src); err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+ if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
func testUserMetadataCopying() {
// initialize logging params
startTime := time.Now()
@@ -5990,7 +6420,7 @@ func testFunctionalV2() {
startTime := time.Now()
testName := getFuncName()
function := "testFunctionalV2()"
- function_all := ""
+ functionAll := ""
args := map[string]interface{}{}
// Seed random based on current time.
@@ -6018,7 +6448,7 @@ func testFunctionalV2() {
location := "us-east-1"
// Make a new bucket.
function = "MakeBucket(bucketName, location)"
- function_all = "MakeBucket(bucketName, location)"
+ functionAll = "MakeBucket(bucketName, location)"
args = map[string]interface{}{
"bucketName": bucketName,
"location": location,
@@ -6049,7 +6479,7 @@ func testFunctionalV2() {
// Verify if bucket exits and you have access.
var exists bool
function = "BucketExists(bucketName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
}
@@ -6064,14 +6494,17 @@ func testFunctionalV2() {
}
// Make the bucket 'public read/write'.
- function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
- function_all += ", " + function
+ function = "SetBucketPolicy(bucketName, bucketPolicy)"
+ functionAll += ", " + function
+
+ readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads,s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `/*"],"Sid": ""}]}`
+
args = map[string]interface{}{
"bucketName": bucketName,
- "objectPrefix": "",
- "bucketPolicy": policy.BucketPolicyReadWrite,
+ "bucketPolicy": readWritePolicy,
}
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ err = c.SetBucketPolicy(bucketName, readWritePolicy)
+
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
@@ -6079,7 +6512,7 @@ func testFunctionalV2() {
// List all buckets.
function = "ListBuckets()"
- function_all += ", " + function
+ functionAll += ", " + function
args = nil
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
@@ -6145,7 +6578,7 @@ func testFunctionalV2() {
objFound := false
isRecursive := true // Recursive is true.
function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6164,7 +6597,7 @@ func testFunctionalV2() {
incompObjNotFound := true
function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6182,7 +6615,7 @@ func testFunctionalV2() {
}
function = "GetObject(bucketName, objectName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6198,6 +6631,7 @@ func testFunctionalV2() {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
}
+ newReader.Close()
if !bytes.Equal(newReadBytes, buf) {
logError(testName, function, args, startTime, "", "Bytes mismatch", err)
@@ -6205,7 +6639,7 @@ func testFunctionalV2() {
}
function = "FGetObject(bucketName, objectName, fileName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6219,7 +6653,7 @@ func testFunctionalV2() {
// Generate presigned HEAD object url.
function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6248,7 +6682,7 @@ func testFunctionalV2() {
// Generate presigned GET object url.
function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -6316,7 +6750,7 @@ func testFunctionalV2() {
}
function = "PresignedPutObject(bucketName, objectName, expires)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-presigned",
@@ -6350,7 +6784,7 @@ func testFunctionalV2() {
}
function = "GetObject(bucketName, objectName)"
- function_all += ", " + function
+ functionAll += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-presigned",
@@ -6366,6 +6800,7 @@ func testFunctionalV2() {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
}
+ newReader.Close()
if !bytes.Equal(newReadBytes, buf) {
logError(testName, function, args, startTime, "", "Bytes mismatch", err)
@@ -6386,7 +6821,7 @@ func testFunctionalV2() {
logError(testName, function, args, startTime, "", "File removes failed", err)
return
}
- successLogger(testName, function_all, args, startTime).Info()
+ successLogger(testName, functionAll, args, startTime).Info()
}
// Test get object with GetObjectWithContext
@@ -6454,10 +6889,12 @@ func testGetObjectWithContext() {
logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
return
}
+
if _, err = r.Stat(); err == nil {
logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
return
}
+ r.Close()
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
args["ctx"] = ctx
@@ -6736,6 +7173,7 @@ func testGetObjectWithContextV2() {
logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
return
}
+ r.Close()
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
defer cancel()
@@ -6865,6 +7303,120 @@ func testFGetObjectWithContextV2() {
}
+// Test list object v1 and V2 storage class fields
+func testListObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectPrefix": "",
+ "recursive": "true",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName1 := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ _, err = c.PutObject(bucketName, objectName1, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "STANDARD"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject1 call failed", err)
+ return
+ }
+
+ bufSize1 := dataFileMap["datafile-33-kB"]
+ var reader1 = getDataReader("datafile-33-kB")
+ defer reader1.Close()
+ objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ _, err = c.PutObject(bucketName, objectName2, reader1, int64(bufSize1), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "REDUCED_REDUNDANCY"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject2 call failed", err)
+ return
+ }
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(chan struct{})
+ // Exit cleanly upon return.
+ defer close(doneCh)
+
+ // check for storage-class from ListObjects result
+ for objInfo := range c.ListObjects(bucketName, "", true, doneCh) {
+ if objInfo.Err != nil {
+ logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err)
+ return
+ }
+ if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" {
+ logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err)
+ return
+ }
+ if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" {
+ logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err)
+ return
+ }
+ }
+
+ // check for storage-class from ListObjectsV2 result
+ for objInfo := range c.ListObjectsV2(bucketName, "", true, doneCh) {
+ if objInfo.Err != nil {
+ logError(testName, function, args, startTime, "", "ListObjectsV2 failed unexpectedly", err)
+ return
+ }
+ if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" {
+ logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err)
+ return
+ }
+ if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" {
+ logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
// Convert string to bool and always return false if any error
func mustParseBool(str string) bool {
b, err := strconv.ParseBool(str)
@@ -6889,7 +7441,6 @@ func main() {
if isFullMode() {
testMakeBucketErrorV2()
testGetObjectClosedTwiceV2()
- testRemovePartiallyUploadedV2()
testFPutObjectV2()
testMakeBucketRegionsV2()
testGetObjectReadSeekFunctionalV2()
@@ -6911,19 +7462,15 @@ func main() {
testPutObjectWithMetadata()
testPutObjectReadAt()
testPutObjectStreaming()
- testListPartiallyUploaded()
testGetObjectSeekEnd()
testGetObjectClosedTwice()
testRemoveMultipleObjects()
- testRemovePartiallyUploaded()
testFPutObjectMultipart()
testFPutObject()
testGetObjectReadSeekFunctional()
testGetObjectReadAtFunctional()
testPresignedPostPolicy()
testCopyObject()
- testEncryptionPutGet()
- testEncryptionFPut()
testComposeObjectErrorCases()
testCompose10KSources()
testUserMetadataCopying()
@@ -6938,11 +7485,19 @@ func main() {
testStorageClassMetadataPutObject()
testStorageClassInvalidMetadataPutObject()
testStorageClassMetadataCopyObject()
+ testPutObjectWithContentLanguage()
+ testListObjects()
// SSE-C tests will only work over TLS connection.
if tls {
+ testEncryptionPutGet()
+ testEncryptionFPut()
+ testEncryptedGetObjectReadAtFunctional()
+ testEncryptedGetObjectReadSeekFunctional()
testEncryptedCopyObjectV2()
testEncryptedCopyObject()
+ testEncryptedEmptyObject()
+ testDecryptedCopyObject()
}
} else {
testFunctional()
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
deleted file mode 100644
index b0f2d6e08..000000000
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package encrypt
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/cipher"
- "crypto/rand"
- "encoding/base64"
- "errors"
- "io"
-)
-
-// Crypt mode - encryption or decryption
-type cryptMode int
-
-const (
- encryptMode cryptMode = iota
- decryptMode
-)
-
-// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm
-type CBCSecureMaterials struct {
-
- // Data stream to encrypt/decrypt
- stream io.Reader
-
- // Last internal error
- err error
-
- // End of file reached
- eof bool
-
- // Holds initial data
- srcBuf *bytes.Buffer
-
- // Holds transformed data (encrypted or decrypted)
- dstBuf *bytes.Buffer
-
- // Encryption algorithm
- encryptionKey Key
-
- // Key to encrypts/decrypts data
- contentKey []byte
-
- // Encrypted form of contentKey
- cryptedKey []byte
-
- // Initialization vector
- iv []byte
-
- // matDesc - currently unused
- matDesc []byte
-
- // Indicate if we are going to encrypt or decrypt
- cryptMode cryptMode
-
- // Helper that encrypts/decrypts data
- blockMode cipher.BlockMode
-}
-
-// NewCBCSecureMaterials builds new CBC crypter module with
-// the specified encryption key (symmetric or asymmetric)
-func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
- if key == nil {
- return nil, errors.New("Unable to recognize empty encryption properties")
- }
- return &CBCSecureMaterials{
- srcBuf: bytes.NewBuffer([]byte{}),
- dstBuf: bytes.NewBuffer([]byte{}),
- encryptionKey: key,
- matDesc: []byte("{}"),
- }, nil
-
-}
-
-// Close implements closes the internal stream.
-func (s *CBCSecureMaterials) Close() error {
- closer, ok := s.stream.(io.Closer)
- if ok {
- return closer.Close()
- }
- return nil
-}
-
-// SetupEncryptMode - tells CBC that we are going to encrypt data
-func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
- // Set mode to encrypt
- s.cryptMode = encryptMode
-
- // Set underlying reader
- s.stream = stream
-
- s.eof = false
- s.srcBuf.Reset()
- s.dstBuf.Reset()
-
- var err error
-
- // Generate random content key
- s.contentKey = make([]byte, aes.BlockSize*2)
- if _, err := rand.Read(s.contentKey); err != nil {
- return err
- }
- // Encrypt content key
- s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey)
- if err != nil {
- return err
- }
- // Generate random IV
- s.iv = make([]byte, aes.BlockSize)
- if _, err = rand.Read(s.iv); err != nil {
- return err
- }
- // New cipher
- encryptContentBlock, err := aes.NewCipher(s.contentKey)
- if err != nil {
- return err
- }
-
- s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv)
-
- return nil
-}
-
-// SetupDecryptMode - tells CBC that we are going to decrypt data
-func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error {
- // Set mode to decrypt
- s.cryptMode = decryptMode
-
- // Set underlying reader
- s.stream = stream
-
- // Reset
- s.eof = false
- s.srcBuf.Reset()
- s.dstBuf.Reset()
-
- var err error
-
- // Get IV
- s.iv, err = base64.StdEncoding.DecodeString(iv)
- if err != nil {
- return err
- }
-
- // Get encrypted content key
- s.cryptedKey, err = base64.StdEncoding.DecodeString(key)
- if err != nil {
- return err
- }
-
- // Decrypt content key
- s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey)
- if err != nil {
- return err
- }
-
- // New cipher
- decryptContentBlock, err := aes.NewCipher(s.contentKey)
- if err != nil {
- return err
- }
-
- s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv)
- return nil
-}
-
-// GetIV - return randomly generated IV (per S3 object), base64 encoded.
-func (s *CBCSecureMaterials) GetIV() string {
- return base64.StdEncoding.EncodeToString(s.iv)
-}
-
-// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded.
-func (s *CBCSecureMaterials) GetKey() string {
- return base64.StdEncoding.EncodeToString(s.cryptedKey)
-}
-
-// GetDesc - user provided encryption material description in JSON (UTF8) format.
-func (s *CBCSecureMaterials) GetDesc() string {
- return string(s.matDesc)
-}
-
-// Fill buf with encrypted/decrypted data
-func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) {
- // Always fill buf from bufChunk at the end of this function
- defer func() {
- if s.err != nil {
- n, err = 0, s.err
- } else {
- n, err = s.dstBuf.Read(buf)
- }
- }()
-
- // Return
- if s.eof {
- return
- }
-
- // Fill dest buffer if its length is less than buf
- for !s.eof && s.dstBuf.Len() < len(buf) {
-
- srcPart := make([]byte, aes.BlockSize)
- dstPart := make([]byte, aes.BlockSize)
-
- // Fill src buffer
- for s.srcBuf.Len() < aes.BlockSize*2 {
- _, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize)
- if err != nil {
- break
- }
- }
-
- // Quit immediately for errors other than io.EOF
- if err != nil && err != io.EOF {
- s.err = err
- return
- }
-
- // Mark current encrypting/decrypting as finished
- s.eof = (err == io.EOF)
-
- if s.eof && s.cryptMode == encryptMode {
- if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil {
- s.err = err
- return
- }
- } else {
- _, _ = s.srcBuf.Read(srcPart)
- }
-
- // Crypt srcPart content
- for len(srcPart) > 0 {
-
- // Crypt current part
- s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize])
-
- // Unpad when this is the last part and we are decrypting
- if s.eof && s.cryptMode == decryptMode {
- dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize)
- if err != nil {
- s.err = err
- return
- }
- }
-
- // Send crypted data to dstBuf
- if _, wErr := s.dstBuf.Write(dstPart); wErr != nil {
- s.err = wErr
- return
- }
- // Move to the next part
- srcPart = srcPart[aes.BlockSize:]
- }
- }
- return
-}
-
-// Unpad a set of bytes following PKCS5 algorithm
-func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) {
- len := len(buf)
- if len == 0 {
- return nil, errors.New("buffer is empty")
- }
- pad := int(buf[len-1])
- if pad > len || pad > blockSize {
- return nil, errors.New("invalid padding size")
- }
- return buf[:len-pad], nil
-}
-
-// Pad a set of bytes following PKCS5 algorithm
-func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) {
- len := len(buf)
- pad := blockSize - (len % blockSize)
- padText := bytes.Repeat([]byte{byte(pad)}, pad)
- return append(buf, padText...), nil
-}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
deleted file mode 100644
index 482922ab7..000000000
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package encrypt implements a generic interface to encrypt any stream of data.
-// currently this package implements two types of encryption
-// - Symmetric encryption using AES.
-// - Asymmetric encrytion using RSA.
-package encrypt
-
-import "io"
-
-// Materials - provides generic interface to encrypt any stream of data.
-type Materials interface {
-
- // Closes the wrapped stream properly, initiated by the caller.
- Close() error
-
- // Returns encrypted/decrypted data, io.Reader compatible.
- Read(b []byte) (int, error)
-
- // Get randomly generated IV, base64 encoded.
- GetIV() (iv string)
-
- // Get content encrypting key (cek) in encrypted form, base64 encoded.
- GetKey() (key string)
-
- // Get user provided encryption material description in
- // JSON (UTF8) format. This is not used, kept for future.
- GetDesc() (desc string)
-
- // Setup encrypt mode, further calls of Read() function
- // will return the encrypted form of data streamed
- // by the passed reader
- SetupEncryptMode(stream io.Reader) error
-
- // Setup decrypted mode, further calls of Read() function
- // will return the decrypted form of data streamed
- // by the passed reader
- SetupDecryptMode(stream io.Reader, iv string, key string) error
-}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
deleted file mode 100644
index 0ed95f5ff..000000000
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package encrypt
-
-import (
- "crypto/aes"
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "errors"
-)
-
-// Key - generic interface to encrypt/decrypt a key.
-// We use it to encrypt/decrypt content key which is the key
-// that encrypt/decrypt object data.
-type Key interface {
- // Encrypt data using to the set encryption key
- Encrypt([]byte) ([]byte, error)
- // Decrypt data using to the set encryption key
- Decrypt([]byte) ([]byte, error)
-}
-
-// SymmetricKey - encrypts data with a symmetric master key
-type SymmetricKey struct {
- masterKey []byte
-}
-
-// Encrypt passed bytes
-func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) {
- // Initialize an AES encryptor using a master key
- keyBlock, err := aes.NewCipher(s.masterKey)
- if err != nil {
- return []byte{}, err
- }
-
- // Pad the key before encryption
- plain, _ = pkcs5Pad(plain, aes.BlockSize)
-
- encKey := []byte{}
- encPart := make([]byte, aes.BlockSize)
-
- // Encrypt the passed key by block
- for {
- if len(plain) < aes.BlockSize {
- break
- }
- // Encrypt the passed key
- keyBlock.Encrypt(encPart, plain[:aes.BlockSize])
- // Add the encrypted block to the total encrypted key
- encKey = append(encKey, encPart...)
- // Pass to the next plain block
- plain = plain[aes.BlockSize:]
- }
- return encKey, nil
-}
-
-// Decrypt passed bytes
-func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
- // Initialize AES decrypter
- keyBlock, err := aes.NewCipher(s.masterKey)
- if err != nil {
- return nil, err
- }
-
- var plain []byte
- plainPart := make([]byte, aes.BlockSize)
-
- // Decrypt the encrypted data block by block
- for {
- if len(cipher) < aes.BlockSize {
- break
- }
- keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize])
- // Add the decrypted block to the total result
- plain = append(plain, plainPart...)
- // Pass to the next cipher block
- cipher = cipher[aes.BlockSize:]
- }
-
- // Unpad the resulted plain data
- plain, err = pkcs5Unpad(plain, aes.BlockSize)
- if err != nil {
- return nil, err
- }
-
- return plain, nil
-}
-
-// NewSymmetricKey generates a new encrypt/decrypt crypto using
-// an AES master key password
-func NewSymmetricKey(b []byte) *SymmetricKey {
- return &SymmetricKey{masterKey: b}
-}
-
-// AsymmetricKey - struct which encrypts/decrypts data
-// using RSA public/private certificates
-type AsymmetricKey struct {
- publicKey *rsa.PublicKey
- privateKey *rsa.PrivateKey
-}
-
-// Encrypt data using public key
-func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) {
- cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain)
- if err != nil {
- return nil, err
- }
- return cipher, nil
-}
-
-// Decrypt data using public key
-func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
- cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher)
- if err != nil {
- return nil, err
- }
- return cipher, nil
-}
-
-// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt
-// data using a pair for private and public key
-func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) {
- // Parse private key from passed data
- priv, err := x509.ParsePKCS8PrivateKey(privData)
- if err != nil {
- return nil, err
- }
- privKey, ok := priv.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("not a valid private key")
- }
-
- // Parse public key from passed data
- pub, err := x509.ParsePKIXPublicKey(pubData)
- if err != nil {
- return nil, err
- }
-
- pubKey, ok := pub.(*rsa.PublicKey)
- if !ok {
- return nil, errors.New("not a valid public key")
- }
-
- // Associate the private key with the passed public key
- privKey.PublicKey = *pubKey
-
- return &AsymmetricKey{
- publicKey: pubKey,
- privateKey: privKey,
- }, nil
-}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go
new file mode 100644
index 000000000..2d3c70f00
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go
@@ -0,0 +1,195 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "net/http"
+
+ "golang.org/x/crypto/argon2"
+)
+
+const (
+ // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
+ sseGenericHeader = "X-Amz-Server-Side-Encryption"
+
+ // sseKmsKeyID is the AWS SSE-KMS key id.
+ sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id"
+ // sseEncryptionContext is the AWS SSE-KMS Encryption Context data.
+ sseEncryptionContext = sseGenericHeader + "-Encryption-Context"
+
+ // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
+ sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm"
+ // sseCustomerKey is the AWS SSE-C encryption key HTTP header key.
+ sseCustomerKey = sseGenericHeader + "-Customer-Key"
+ // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
+ sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5"
+
+ // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
+ sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
+ sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
+ sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
+)
+
+// PBKDF creates a SSE-C key from the provided password and salt.
+// PBKDF is a password-based key derivation function
+// which can be used to derive a high-entropy cryptographic
+// key from a low-entropy password and a salt.
+type PBKDF func(password, salt []byte) ServerSide
+
+// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
+// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
+var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
+ sse := ssec{}
+ copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
+ return sse
+}
+
+// Type is the server-side-encryption method. It represents one of
+// the following encryption methods:
+// - SSE-C: server-side-encryption with customer provided keys
+// - KMS: server-side-encryption with managed keys
+// - S3: server-side-encryption using S3 storage encryption
+type Type string
+
+const (
+ // SSEC represents server-side-encryption with customer provided keys
+ SSEC Type = "SSE-C"
+ // KMS represents server-side-encryption with managed keys
+ KMS Type = "KMS"
+ // S3 represents server-side-encryption using S3 storage encryption
+ S3 Type = "S3"
+)
+
+// ServerSide is a form of S3 server-side-encryption.
+type ServerSide interface {
+ // Type returns the server-side-encryption method.
+ Type() Type
+
+ // Marshal adds encryption headers to the provided HTTP headers.
+ // It marks an HTTP request as server-side-encryption request
+ // and inserts the required data into the headers.
+ Marshal(h http.Header)
+}
+
+// NewSSE returns a server-side-encryption using S3 storage encryption.
+// Using SSE-S3 the server will encrypt the object with server-managed keys.
+func NewSSE() ServerSide { return s3{} }
+
+// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
+func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
+ if context == nil {
+ return kms{key: keyID, hasContext: false}, nil
+ }
+ serializedContext, err := json.Marshal(context)
+ if err != nil {
+ return nil, err
+ }
+ return kms{key: keyID, context: serializedContext, hasContext: true}, nil
+}
+
+// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
+// The key must be 32 bytes long.
+func NewSSEC(key []byte) (ServerSide, error) {
+ if len(key) != 32 {
+ return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
+ }
+ sse := ssec{}
+ copy(sse[:], key)
+ return sse, nil
+}
+
+// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
+// It is the inverse of SSECopy(...).
+//
+// If the provided sse is no SSE-C copy encryption SSE returns
+// sse unmodified.
+func SSE(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssecCopy); ok {
+ return ssec(sse)
+ }
+ return sse
+}
+
+// SSECopy transforms a SSE-C encryption into a SSE-C copy
+// encryption. This is required for SSE-C key rotation or a SSE-C
+// copy where the source and the destination should be encrypted.
+//
+// If the provided sse is no SSE-C encryption SSECopy returns
+// sse unmodified.
+func SSECopy(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssec); ok {
+ return ssecCopy(sse)
+ }
+ return sse
+}
+
+type ssec [32]byte
+
+func (s ssec) Type() Type { return SSEC }
+
+func (s ssec) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(sseCustomerAlgorithm, "AES256")
+ h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type ssecCopy [32]byte
+
+func (s ssecCopy) Type() Type { return SSEC }
+
+func (s ssecCopy) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(sseCopyCustomerAlgorithm, "AES256")
+ h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type s3 struct{}
+
+func (s s3) Type() Type { return S3 }
+
+func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") }
+
+type kms struct {
+ key string
+ context []byte
+ hasContext bool
+}
+
+func (s kms) Type() Type { return KMS }
+
+func (s kms) Marshal(h http.Header) {
+ h.Set(sseGenericHeader, "aws:kms")
+ h.Set(sseKmsKeyID, s.key)
+ if s.hasContext {
+ h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
deleted file mode 100644
index 737b810ac..000000000
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package policy
-
-import "github.com/minio/minio-go/pkg/set"
-
-// ConditionKeyMap - map of policy condition key and value.
-type ConditionKeyMap map[string]set.StringSet
-
-// Add - adds key and value. The value is appended If key already exists.
-func (ckm ConditionKeyMap) Add(key string, value set.StringSet) {
- if v, ok := ckm[key]; ok {
- ckm[key] = v.Union(value)
- } else {
- ckm[key] = set.CopyStringSet(value)
- }
-}
-
-// Remove - removes value of given key. If key has empty after removal, the key is also removed.
-func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) {
- if v, ok := ckm[key]; ok {
- if value != nil {
- ckm[key] = v.Difference(value)
- }
-
- if ckm[key].IsEmpty() {
- delete(ckm, key)
- }
- }
-}
-
-// RemoveKey - removes key and its value.
-func (ckm ConditionKeyMap) RemoveKey(key string) {
- if _, ok := ckm[key]; ok {
- delete(ckm, key)
- }
-}
-
-// CopyConditionKeyMap - returns new copy of given ConditionKeyMap.
-func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap {
- out := make(ConditionKeyMap)
-
- for k, v := range condKeyMap {
- out[k] = set.CopyStringSet(v)
- }
-
- return out
-}
-
-// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap.
-func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap {
- out := CopyConditionKeyMap(condKeyMap1)
-
- for k, v := range condKeyMap2 {
- if ev, ok := out[k]; ok {
- out[k] = ev.Union(v)
- } else {
- out[k] = set.CopyStringSet(v)
- }
- }
-
- return out
-}
-
-// ConditionMap - map of condition and conditional values.
-type ConditionMap map[string]ConditionKeyMap
-
-// Add - adds condition key and condition value. The value is appended if key already exists.
-func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) {
- if v, ok := cond[condKey]; ok {
- cond[condKey] = mergeConditionKeyMap(v, condKeyMap)
- } else {
- cond[condKey] = CopyConditionKeyMap(condKeyMap)
- }
-}
-
-// Remove - removes condition key and its value.
-func (cond ConditionMap) Remove(condKey string) {
- if _, ok := cond[condKey]; ok {
- delete(cond, condKey)
- }
-}
-
-// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap.
-func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap {
- out := make(ConditionMap)
-
- for k, v := range condMap1 {
- out[k] = CopyConditionKeyMap(v)
- }
-
- for k, v := range condMap2 {
- if ev, ok := out[k]; ok {
- out[k] = mergeConditionKeyMap(ev, v)
- } else {
- out[k] = CopyConditionKeyMap(v)
- }
- }
-
- return out
-}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
deleted file mode 100644
index 9dda99efc..000000000
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
+++ /dev/null
@@ -1,635 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package policy
-
-import (
- "reflect"
- "strings"
-
- "github.com/minio/minio-go/pkg/set"
-)
-
-// BucketPolicy - Bucket level policy.
-type BucketPolicy string
-
-// Different types of Policies currently supported for buckets.
-const (
- BucketPolicyNone BucketPolicy = "none"
- BucketPolicyReadOnly = "readonly"
- BucketPolicyReadWrite = "readwrite"
- BucketPolicyWriteOnly = "writeonly"
-)
-
-// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
-func (p BucketPolicy) IsValidBucketPolicy() bool {
- switch p {
- case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
- return true
- }
- return false
-}
-
-// Resource prefix for all aws resources.
-const awsResourcePrefix = "arn:aws:s3:::"
-
-// Common bucket actions for both read and write policies.
-var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation")
-
-// Read only bucket actions.
-var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket")
-
-// Write only bucket actions.
-var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads")
-
-// Read only object actions.
-var readOnlyObjectActions = set.CreateStringSet("s3:GetObject")
-
-// Write only object actions.
-var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject")
-
-// Read and write object actions.
-var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions)
-
-// All valid bucket and object actions.
-var validActions = commonBucketActions.
- Union(readOnlyBucketActions).
- Union(writeOnlyBucketActions).
- Union(readOnlyObjectActions).
- Union(writeOnlyObjectActions)
-
-var startsWithFunc = func(resource string, resourcePrefix string) bool {
- return strings.HasPrefix(resource, resourcePrefix)
-}
-
-// User - canonical users list.
-type User struct {
- AWS set.StringSet `json:"AWS,omitempty"`
- CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"`
-}
-
-// Statement - minio policy statement
-type Statement struct {
- Actions set.StringSet `json:"Action"`
- Conditions ConditionMap `json:"Condition,omitempty"`
- Effect string
- Principal User `json:"Principal"`
- Resources set.StringSet `json:"Resource"`
- Sid string
-}
-
-// BucketAccessPolicy - minio policy collection
-type BucketAccessPolicy struct {
- Version string // date in YYYY-MM-DD format
- Statements []Statement `json:"Statement"`
-}
-
-// isValidStatement - returns whether given statement is valid to process for given bucket name.
-func isValidStatement(statement Statement, bucketName string) bool {
- if statement.Actions.Intersection(validActions).IsEmpty() {
- return false
- }
-
- if statement.Effect != "Allow" {
- return false
- }
-
- if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") {
- return false
- }
-
- bucketResource := awsResourcePrefix + bucketName
- if statement.Resources.Contains(bucketResource) {
- return true
- }
-
- if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() {
- return false
- }
-
- return true
-}
-
-// Returns new statements with bucket actions for given policy.
-func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
- statements = []Statement{}
- if policy == BucketPolicyNone || bucketName == "" {
- return statements
- }
-
- bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName)
-
- statement := Statement{
- Actions: commonBucketActions,
- Effect: "Allow",
- Principal: User{AWS: set.CreateStringSet("*")},
- Resources: bucketResource,
- Sid: "",
- }
- statements = append(statements, statement)
-
- if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite {
- statement = Statement{
- Actions: readOnlyBucketActions,
- Effect: "Allow",
- Principal: User{AWS: set.CreateStringSet("*")},
- Resources: bucketResource,
- Sid: "",
- }
- if prefix != "" {
- condKeyMap := make(ConditionKeyMap)
- condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix))
- condMap := make(ConditionMap)
- condMap.Add("StringEquals", condKeyMap)
- statement.Conditions = condMap
- }
- statements = append(statements, statement)
- }
-
- if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite {
- statement = Statement{
- Actions: writeOnlyBucketActions,
- Effect: "Allow",
- Principal: User{AWS: set.CreateStringSet("*")},
- Resources: bucketResource,
- Sid: "",
- }
- statements = append(statements, statement)
- }
-
- return statements
-}
-
-// Returns new statements contains object actions for given policy.
-func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
- statements = []Statement{}
- if policy == BucketPolicyNone || bucketName == "" {
- return statements
- }
-
- statement := Statement{
- Effect: "Allow",
- Principal: User{AWS: set.CreateStringSet("*")},
- Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"),
- Sid: "",
- }
-
- if policy == BucketPolicyReadOnly {
- statement.Actions = readOnlyObjectActions
- } else if policy == BucketPolicyWriteOnly {
- statement.Actions = writeOnlyObjectActions
- } else if policy == BucketPolicyReadWrite {
- statement.Actions = readWriteObjectActions
- }
-
- statements = append(statements, statement)
- return statements
-}
-
-// Returns new statements for given policy, bucket and prefix.
-func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
- statements = []Statement{}
- ns := newBucketStatement(policy, bucketName, prefix)
- statements = append(statements, ns...)
-
- ns = newObjectStatement(policy, bucketName, prefix)
- statements = append(statements, ns...)
-
- return statements
-}
-
-// Returns whether given bucket statements are used by other than given prefix statements.
-func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) {
- resourcePrefix := awsResourcePrefix + bucketName + "/"
- objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
-
- for _, s := range statements {
- if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() {
- if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
- readOnlyInUse = true
- }
-
- if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
- writeOnlyInUse = true
- }
- }
- if readOnlyInUse && writeOnlyInUse {
- break
- }
- }
-
- return readOnlyInUse, writeOnlyInUse
-}
-
-// Removes object actions in given statement.
-func removeObjectActions(statement Statement, objectResource string) Statement {
- if statement.Conditions == nil {
- if len(statement.Resources) > 1 {
- statement.Resources.Remove(objectResource)
- } else {
- statement.Actions = statement.Actions.Difference(readOnlyObjectActions)
- statement.Actions = statement.Actions.Difference(writeOnlyObjectActions)
- }
- }
-
- return statement
-}
-
-// Removes bucket actions for given policy in given statement.
-func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement {
- removeReadOnly := func() {
- if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
- return
- }
-
- if statement.Conditions == nil {
- statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
- return
- }
-
- if prefix != "" {
- stringEqualsValue := statement.Conditions["StringEquals"]
- values := set.NewStringSet()
- if stringEqualsValue != nil {
- values = stringEqualsValue["s3:prefix"]
- if values == nil {
- values = set.NewStringSet()
- }
- }
-
- values.Remove(prefix)
-
- if stringEqualsValue != nil {
- if values.IsEmpty() {
- delete(stringEqualsValue, "s3:prefix")
- }
- if len(stringEqualsValue) == 0 {
- delete(statement.Conditions, "StringEquals")
- }
- }
-
- if len(statement.Conditions) == 0 {
- statement.Conditions = nil
- statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
- }
- }
- }
-
- removeWriteOnly := func() {
- if statement.Conditions == nil {
- statement.Actions = statement.Actions.Difference(writeOnlyBucketActions)
- }
- }
-
- if len(statement.Resources) > 1 {
- statement.Resources.Remove(bucketResource)
- } else {
- if !readOnlyInUse {
- removeReadOnly()
- }
-
- if !writeOnlyInUse {
- removeWriteOnly()
- }
- }
-
- return statement
-}
-
-// Returns statements containing removed actions/statements for given
-// policy, bucket name and prefix.
-func removeStatements(statements []Statement, bucketName string, prefix string) []Statement {
- bucketResource := awsResourcePrefix + bucketName
- objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
- readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix)
-
- out := []Statement{}
- readOnlyBucketStatements := []Statement{}
- s3PrefixValues := set.NewStringSet()
-
- for _, statement := range statements {
- if !isValidStatement(statement, bucketName) {
- out = append(out, statement)
- continue
- }
-
- if statement.Resources.Contains(bucketResource) {
- if statement.Conditions != nil {
- statement = removeBucketActions(statement, prefix, bucketResource, false, false)
- } else {
- statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse)
- }
- } else if statement.Resources.Contains(objectResource) {
- statement = removeObjectActions(statement, objectResource)
- }
-
- if !statement.Actions.IsEmpty() {
- if statement.Resources.Contains(bucketResource) &&
- statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) &&
- statement.Effect == "Allow" &&
- statement.Principal.AWS.Contains("*") {
-
- if statement.Conditions != nil {
- stringEqualsValue := statement.Conditions["StringEquals"]
- values := set.NewStringSet()
- if stringEqualsValue != nil {
- values = stringEqualsValue["s3:prefix"]
- if values == nil {
- values = set.NewStringSet()
- }
- }
- s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string {
- return bucketResource + "/" + v + "*"
- }))
- } else if !s3PrefixValues.IsEmpty() {
- readOnlyBucketStatements = append(readOnlyBucketStatements, statement)
- continue
- }
- }
- out = append(out, statement)
- }
- }
-
- skipBucketStatement := true
- resourcePrefix := awsResourcePrefix + bucketName + "/"
- for _, statement := range out {
- if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() &&
- s3PrefixValues.Intersection(statement.Resources).IsEmpty() {
- skipBucketStatement = false
- break
- }
- }
-
- for _, statement := range readOnlyBucketStatements {
- if skipBucketStatement &&
- statement.Resources.Contains(bucketResource) &&
- statement.Effect == "Allow" &&
- statement.Principal.AWS.Contains("*") &&
- statement.Conditions == nil {
- continue
- }
-
- out = append(out, statement)
- }
-
- if len(out) == 1 {
- statement := out[0]
- if statement.Resources.Contains(bucketResource) &&
- statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
- statement.Effect == "Allow" &&
- statement.Principal.AWS.Contains("*") &&
- statement.Conditions == nil {
- out = []Statement{}
- }
- }
-
- return out
-}
-
-// Appends given statement into statement list to have unique statements.
-// - If statement already exists in statement list, it ignores.
-// - If statement exists with different conditions, they are merged.
-// - Else the statement is appended to statement list.
-func appendStatement(statements []Statement, statement Statement) []Statement {
- for i, s := range statements {
- if s.Actions.Equals(statement.Actions) &&
- s.Effect == statement.Effect &&
- s.Principal.AWS.Equals(statement.Principal.AWS) &&
- reflect.DeepEqual(s.Conditions, statement.Conditions) {
- statements[i].Resources = s.Resources.Union(statement.Resources)
- return statements
- } else if s.Resources.Equals(statement.Resources) &&
- s.Effect == statement.Effect &&
- s.Principal.AWS.Equals(statement.Principal.AWS) &&
- reflect.DeepEqual(s.Conditions, statement.Conditions) {
- statements[i].Actions = s.Actions.Union(statement.Actions)
- return statements
- }
-
- if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) &&
- s.Actions.Intersection(statement.Actions).Equals(statement.Actions) &&
- s.Effect == statement.Effect &&
- s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) {
- if reflect.DeepEqual(s.Conditions, statement.Conditions) {
- return statements
- }
- if s.Conditions != nil && statement.Conditions != nil {
- if s.Resources.Equals(statement.Resources) {
- statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions)
- return statements
- }
- }
- }
- }
-
- if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) {
- return append(statements, statement)
- }
-
- return statements
-}
-
-// Appends two statement lists.
-func appendStatements(statements []Statement, appendStatements []Statement) []Statement {
- for _, s := range appendStatements {
- statements = appendStatement(statements, s)
- }
-
- return statements
-}
-
-// Returns policy of given bucket statement.
-func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) {
- if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) {
- return commonFound, readOnly, writeOnly
- }
-
- if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
- statement.Conditions == nil {
- commonFound = true
- }
-
- if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) &&
- statement.Conditions == nil {
- writeOnly = true
- }
-
- if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
- if prefix != "" && statement.Conditions != nil {
- if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok {
- if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok {
- if s3PrefixValues.Contains(prefix) {
- readOnly = true
- }
- }
- } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok {
- if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok {
- if !s3PrefixValues.Contains(prefix) {
- readOnly = true
- }
- }
- }
- } else if prefix == "" && statement.Conditions == nil {
- readOnly = true
- } else if prefix != "" && statement.Conditions == nil {
- readOnly = true
- }
- }
-
- return commonFound, readOnly, writeOnly
-}
-
-// Returns policy of given object statement.
-func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
- if statement.Effect == "Allow" &&
- statement.Principal.AWS.Contains("*") &&
- statement.Conditions == nil {
- if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
- readOnly = true
- }
- if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
- writeOnly = true
- }
- }
-
- return readOnly, writeOnly
-}
-
-// GetPolicy - Returns policy of given bucket name, prefix in given statements.
-func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
- bucketResource := awsResourcePrefix + bucketName
- objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
-
- bucketCommonFound := false
- bucketReadOnly := false
- bucketWriteOnly := false
- matchedResource := ""
- objReadOnly := false
- objWriteOnly := false
-
- for _, s := range statements {
- matchedObjResources := set.NewStringSet()
- if s.Resources.Contains(objectResource) {
- matchedObjResources.Add(objectResource)
- } else {
- matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource)
- }
-
- if !matchedObjResources.IsEmpty() {
- readOnly, writeOnly := getObjectPolicy(s)
- for resource := range matchedObjResources {
- if len(matchedResource) < len(resource) {
- objReadOnly = readOnly
- objWriteOnly = writeOnly
- matchedResource = resource
- } else if len(matchedResource) == len(resource) {
- objReadOnly = objReadOnly || readOnly
- objWriteOnly = objWriteOnly || writeOnly
- matchedResource = resource
- }
- }
- } else if s.Resources.Contains(bucketResource) {
- commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix)
- bucketCommonFound = bucketCommonFound || commonFound
- bucketReadOnly = bucketReadOnly || readOnly
- bucketWriteOnly = bucketWriteOnly || writeOnly
- }
- }
-
- policy := BucketPolicyNone
- if bucketCommonFound {
- if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly {
- policy = BucketPolicyReadWrite
- } else if bucketReadOnly && objReadOnly {
- policy = BucketPolicyReadOnly
- } else if bucketWriteOnly && objWriteOnly {
- policy = BucketPolicyWriteOnly
- }
- }
-
- return policy
-}
-
-// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
-func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
- policyRules := map[string]BucketPolicy{}
- objResources := set.NewStringSet()
- // Search all resources related to objects policy
- for _, s := range statements {
- for r := range s.Resources {
- if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
- objResources.Add(r)
- }
- }
- }
- // Pretend that policy resource as an actual object and fetch its policy
- for r := range objResources {
- // Put trailing * if exists in asterisk
- asterisk := ""
- if strings.HasSuffix(r, "*") {
- r = r[:len(r)-1]
- asterisk = "*"
- }
- objectPath := r[len(awsResourcePrefix+bucketName)+1:]
- p := GetPolicy(statements, bucketName, objectPath)
- policyRules[bucketName+"/"+objectPath+asterisk] = p
- }
- return policyRules
-}
-
-// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
-func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
- out := removeStatements(statements, bucketName, prefix)
- // fmt.Println("out = ")
- // printstatement(out)
- ns := newStatements(policy, bucketName, prefix)
- // fmt.Println("ns = ")
- // printstatement(ns)
-
- rv := appendStatements(out, ns)
- // fmt.Println("rv = ")
- // printstatement(rv)
-
- return rv
-}
-
-// Match function matches wild cards in 'pattern' for resource.
-func resourceMatch(pattern, resource string) bool {
- if pattern == "" {
- return resource == pattern
- }
- if pattern == "*" {
- return true
- }
- parts := strings.Split(pattern, "*")
- if len(parts) == 1 {
- return resource == pattern
- }
- tGlob := strings.HasSuffix(pattern, "*")
- end := len(parts) - 1
- if !strings.HasPrefix(resource, parts[0]) {
- return false
- }
- for i := 1; i < end; i++ {
- if !strings.Contains(resource, parts[i]) {
- return false
- }
- idx := strings.Index(resource, parts[i]) + len(parts[i])
- resource = resource[idx:]
- }
- return tGlob || strings.HasSuffix(resource, parts[end])
-}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
index 0b90c41f6..b4070938e 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -25,7 +25,6 @@ import (
"fmt"
"net/http"
"net/url"
- "path/filepath"
"sort"
"strconv"
"strings"
@@ -40,21 +39,17 @@ const (
)
// Encode input URL path to URL encoded path.
-func encodeURL2Path(req *http.Request) (path string) {
- reqHost := getHostAddr(req)
- // Encode URL path.
- if isS3, _ := filepath.Match("*.s3*.amazonaws.com", reqHost); isS3 {
- bucketName := reqHost[:strings.LastIndex(reqHost, ".s3")]
- path = "/" + bucketName
- path += req.URL.Path
- path = s3utils.EncodePath(path)
- return
- }
- if strings.HasSuffix(reqHost, ".storage.googleapis.com") {
- path = "/" + strings.TrimSuffix(reqHost, ".storage.googleapis.com")
- path += req.URL.Path
- path = s3utils.EncodePath(path)
- return
+func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
+ if virtualHost {
+ reqHost := getHostAddr(req)
+ dotPos := strings.Index(reqHost, ".")
+ if dotPos > -1 {
+ bucketName := reqHost[:dotPos]
+ path = "/" + bucketName
+ path += req.URL.Path
+ path = s3utils.EncodePath(path)
+ return
+ }
}
path = s3utils.EncodePath(req.URL.Path)
return
@@ -62,7 +57,7 @@ func encodeURL2Path(req *http.Request) (path string) {
// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
-func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -78,7 +73,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
}
// Get presigned string to sign.
- stringToSign := preStringToSignV2(req)
+ stringToSign := preStringToSignV2(req, virtualHost)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -132,7 +127,7 @@ func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
// CanonicalizedProtocolHeaders = <described below>
// SignV2 sign the request before Do() (AWS Signature Version 2).
-func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -147,7 +142,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
}
// Calculate HMAC for secretAccessKey.
- stringToSign := stringToSignV2(req)
+ stringToSign := stringToSignV2(req, virtualHost)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -172,14 +167,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// Expires + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func preStringToSignV2(req http.Request) string {
+func preStringToSignV2(req http.Request, virtualHost bool) string {
buf := new(bytes.Buffer)
// Write standard headers.
writePreSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- writeCanonicalizedResource(buf, req)
+ writeCanonicalizedResource(buf, req, virtualHost)
return buf.String()
}
@@ -199,14 +194,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func stringToSignV2(req http.Request) string {
+func stringToSignV2(req http.Request, virtualHost bool) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- writeCanonicalizedResource(buf, req)
+ writeCanonicalizedResource(buf, req, virtualHost)
return buf.String()
}
@@ -288,11 +283,11 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
- buf.WriteString(encodeURL2Path(&req))
+ buf.WriteString(encodeURL2Path(&req, virtualHost))
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
index c21a76d79..49d6dcdf5 100644
--- a/vendor/github.com/minio/minio-go/retry.go
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -111,6 +111,9 @@ func isNetErrorRetryable(err error) bool {
} else if strings.Contains(err.Error(), "connection timed out") {
// If err is a net.Dial timeout, retry.
return true
+ } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
+ // If error is transport connection broken, retry.
+ return true
}
}
}
diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go
index e2dafe172..88700cfe7 100644
--- a/vendor/github.com/minio/minio-go/transport.go
+++ b/vendor/github.com/minio/minio-go/transport.go
@@ -2,7 +2,7 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
+ * Copyright 2017-2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,9 +25,10 @@ import (
"time"
)
-// This default transport is similar to http.DefaultTransport
-// but with additional DisableCompression:
-var defaultMinioTransport http.RoundTripper = &http.Transport{
+// DefaultTransport - this default transport is similar to
+// http.DefaultTransport but with additional param DisableCompression
+// is set to true to avoid decompressing content with 'gzip' encoding.
+var DefaultTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
@@ -35,6 +36,7 @@ var defaultMinioTransport http.RoundTripper = &http.Transport{
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 0f92546d3..2f02ac89f 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -221,16 +221,10 @@ var supportedHeaders = []string{
"cache-control",
"content-encoding",
"content-disposition",
+ "content-language",
// Add more supported headers here.
}
-// cseHeaders is list of client side encryption headers
-var cseHeaders = []string{
- "X-Amz-Iv",
- "X-Amz-Key",
- "X-Amz-Matdesc",
-}
-
// isStorageClassHeader returns true if the header is a supported storage class header
func isStorageClassHeader(headerKey string) bool {
return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey)
@@ -247,19 +241,6 @@ func isStandardHeader(headerKey string) bool {
return false
}
-// isCSEHeader returns true if header is a client side encryption header.
-func isCSEHeader(headerKey string) bool {
- key := strings.ToLower(headerKey)
- for _, h := range cseHeaders {
- header := strings.ToLower(h)
- if (header == key) ||
- (("x-amz-meta-" + header) == key) {
- return true
- }
- }
- return false
-}
-
// sseHeaders is list of server side encryption headers
var sseHeaders = []string{
"x-amz-server-side-encryption",
diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go
new file mode 100644
index 000000000..b423feaea
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/argon2.go
@@ -0,0 +1,285 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package argon2 implements the key derivation function Argon2.
+// Argon2 was selected as the winner of the Password Hashing Competition and can
+// be used to derive cryptographic keys from passwords.
+//
+// For a detailed specification of Argon2 see [1].
+//
+// If you aren't sure which function you need, use Argon2id (IDKey) and
+// the parameter recommendations for your scenario.
+//
+//
+// Argon2i
+//
+// Argon2i (implemented by Key) is the side-channel resistant version of Argon2.
+// It uses data-independent memory access, which is preferred for password
+// hashing and password-based key derivation. Argon2i requires more passes over
+// memory than Argon2id to protect from trade-off attacks. The recommended
+// parameters (taken from [2]) for non-interactive operations are time=3 and to
+// use the maximum available memory.
+//
+//
+// Argon2id
+//
+// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining
+// Argon2i and Argon2d. It uses data-independent memory access for the first
+// half of the first iteration over the memory and data-dependent memory access
+// for the rest. Argon2id is side-channel resistant and provides better brute-
+// force cost savings due to time-memory tradeoffs than Argon2i. The recommended
+// parameters for non-interactive operations (taken from [2]) are time=1 and to
+// use the maximum available memory.
+//
+// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
+// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3
+package argon2
+
+import (
+ "encoding/binary"
+ "sync"
+
+ "golang.org/x/crypto/blake2b"
+)
+
+// The Argon2 version implemented by this package.
+const Version = 0x13
+
+const (
+ argon2d = iota
+ argon2i
+ argon2id
+)
+
+// Key derives a key from the password, salt, and cost parameters using Argon2i
+// returning a byte slice of length keyLen that can be used as cryptographic
+// key. The CPU cost and parallelism degree must be greater than zero.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32)
+//
+// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number.
+// If using that amount of memory (32 MB) is not possible in some contexts then
+// the time parameter can be increased to compensate.
+//
+// The time parameter specifies the number of passes over the memory and the
+// memory parameter specifies the size of the memory in KiB. For example
+// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be
+// adjusted to the number of available CPUs. The cost parameters should be
+// increased as memory latency and CPU parallelism increases. Remember to get a
+// good random salt.
+func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
+ return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen)
+}
+
+// IDKey derives a key from the password, salt, and cost parameters using
+// Argon2id returning a byte slice of length keyLen that can be used as
+// cryptographic key. The CPU cost and parallelism degree must be greater than
+// zero.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32)
+//
+// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number.
+// If using that amount of memory (64 MB) is not possible in some contexts then
+// the time parameter can be increased to compensate.
+//
+// The time parameter specifies the number of passes over the memory and the
+// memory parameter specifies the size of the memory in KiB. For example
+// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be
+// adjusted to the numbers of available CPUs. The cost parameters should be
+// increased as memory latency and CPU parallelism increases. Remember to get a
+// good random salt.
+func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
+ return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen)
+}
+
+func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
+ if time < 1 {
+ panic("argon2: number of rounds too small")
+ }
+ if threads < 1 {
+ panic("argon2: parallelism degree too low")
+ }
+ h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode)
+
+ memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads))
+ if memory < 2*syncPoints*uint32(threads) {
+ memory = 2 * syncPoints * uint32(threads)
+ }
+ B := initBlocks(&h0, memory, uint32(threads))
+ processBlocks(B, time, memory, uint32(threads), mode)
+ return extractKey(B, memory, uint32(threads), keyLen)
+}
+
+const (
+ blockLength = 128
+ syncPoints = 4
+)
+
+type block [blockLength]uint64
+
+func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte {
+ var (
+ h0 [blake2b.Size + 8]byte
+ params [24]byte
+ tmp [4]byte
+ )
+
+ b2, _ := blake2b.New512(nil)
+ binary.LittleEndian.PutUint32(params[0:4], threads)
+ binary.LittleEndian.PutUint32(params[4:8], keyLen)
+ binary.LittleEndian.PutUint32(params[8:12], memory)
+ binary.LittleEndian.PutUint32(params[12:16], time)
+ binary.LittleEndian.PutUint32(params[16:20], uint32(Version))
+ binary.LittleEndian.PutUint32(params[20:24], uint32(mode))
+ b2.Write(params[:])
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(password)))
+ b2.Write(tmp[:])
+ b2.Write(password)
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt)))
+ b2.Write(tmp[:])
+ b2.Write(salt)
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(key)))
+ b2.Write(tmp[:])
+ b2.Write(key)
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(data)))
+ b2.Write(tmp[:])
+ b2.Write(data)
+ b2.Sum(h0[:0])
+ return h0
+}
+
+func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block {
+ var block0 [1024]byte
+ B := make([]block, memory)
+ for lane := uint32(0); lane < threads; lane++ {
+ j := lane * (memory / threads)
+ binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
+
+ binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
+ blake2bHash(block0[:], h0[:])
+ for i := range B[j+0] {
+ B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
+ }
+
+ binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
+ blake2bHash(block0[:], h0[:])
+ for i := range B[j+1] {
+ B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
+ }
+ }
+ return B
+}
+
+func processBlocks(B []block, time, memory, threads uint32, mode int) {
+ lanes := memory / threads
+ segments := lanes / syncPoints
+
+ processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) {
+ var addresses, in, zero block
+ if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
+ in[0] = uint64(n)
+ in[1] = uint64(lane)
+ in[2] = uint64(slice)
+ in[3] = uint64(memory)
+ in[4] = uint64(time)
+ in[5] = uint64(mode)
+ }
+
+ index := uint32(0)
+ if n == 0 && slice == 0 {
+ index = 2 // we have already generated the first two blocks
+ if mode == argon2i || mode == argon2id {
+ in[6]++
+ processBlock(&addresses, &in, &zero)
+ processBlock(&addresses, &addresses, &zero)
+ }
+ }
+
+ offset := lane*lanes + slice*segments + index
+ var random uint64
+ for index < segments {
+ prev := offset - 1
+ if index == 0 && slice == 0 {
+ prev += lanes // last block in lane
+ }
+ if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
+ if index%blockLength == 0 {
+ in[6]++
+ processBlock(&addresses, &in, &zero)
+ processBlock(&addresses, &addresses, &zero)
+ }
+ random = addresses[index%blockLength]
+ } else {
+ random = B[prev][0]
+ }
+ newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index)
+ processBlockXOR(&B[offset], &B[prev], &B[newOffset])
+ index, offset = index+1, offset+1
+ }
+ wg.Done()
+ }
+
+ for n := uint32(0); n < time; n++ {
+ for slice := uint32(0); slice < syncPoints; slice++ {
+ var wg sync.WaitGroup
+ for lane := uint32(0); lane < threads; lane++ {
+ wg.Add(1)
+ go processSegment(n, slice, lane, &wg)
+ }
+ wg.Wait()
+ }
+ }
+
+}
+
+func extractKey(B []block, memory, threads, keyLen uint32) []byte {
+ lanes := memory / threads
+ for lane := uint32(0); lane < threads-1; lane++ {
+ for i, v := range B[(lane*lanes)+lanes-1] {
+ B[memory-1][i] ^= v
+ }
+ }
+
+ var block [1024]byte
+ for i, v := range B[memory-1] {
+ binary.LittleEndian.PutUint64(block[i*8:], v)
+ }
+ key := make([]byte, keyLen)
+ blake2bHash(key, block[:])
+ return key
+}
+
+func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 {
+ refLane := uint32(rand>>32) % threads
+ if n == 0 && slice == 0 {
+ refLane = lane
+ }
+ m, s := 3*segments, ((slice+1)%syncPoints)*segments
+ if lane == refLane {
+ m += index
+ }
+ if n == 0 {
+ m, s = slice*segments, 0
+ if slice == 0 || lane == refLane {
+ m += index
+ }
+ }
+ if index == 0 || lane == refLane {
+ m--
+ }
+ return phi(rand, uint64(m), uint64(s), refLane, lanes)
+}
+
+func phi(rand, m, s uint64, lane, lanes uint32) uint32 {
+ p := rand & 0xFFFFFFFF
+ p = (p * p) >> 32
+ p = (p * m) >> 32
+ return lane*lanes + uint32((s+m-(p+1))%uint64(lanes))
+}
diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go
new file mode 100644
index 000000000..10f46948d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/blake2b.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package argon2
+
+import (
+ "encoding/binary"
+ "hash"
+
+ "golang.org/x/crypto/blake2b"
+)
+
+// blake2bHash computes an arbitrary long hash value of in
+// and writes the hash to out.
+func blake2bHash(out []byte, in []byte) {
+ var b2 hash.Hash
+ if n := len(out); n < blake2b.Size {
+ b2, _ = blake2b.New(n, nil)
+ } else {
+ b2, _ = blake2b.New512(nil)
+ }
+
+ var buffer [blake2b.Size]byte
+ binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out)))
+ b2.Write(buffer[:4])
+ b2.Write(in)
+
+ if len(out) <= blake2b.Size {
+ b2.Sum(out[:0])
+ return
+ }
+
+ outLen := len(out)
+ b2.Sum(buffer[:0])
+ b2.Reset()
+ copy(out, buffer[:32])
+ out = out[32:]
+ for len(out) > blake2b.Size {
+ b2.Write(buffer[:])
+ b2.Sum(buffer[:0])
+ copy(out, buffer[:32])
+ out = out[32:]
+ b2.Reset()
+ }
+
+ if outLen%blake2b.Size > 0 { // outLen > 64
+ r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2
+ b2, _ = blake2b.New(outLen-32*r, nil)
+ }
+ b2.Write(buffer[:])
+ b2.Sum(out[:0])
+}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
new file mode 100644
index 000000000..bb2b0d8b4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
@@ -0,0 +1,61 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package argon2
+
+func init() {
+ useSSE4 = supportsSSE4()
+}
+
+//go:noescape
+func supportsSSE4() bool
+
+//go:noescape
+func mixBlocksSSE2(out, a, b, c *block)
+
+//go:noescape
+func xorBlocksSSE2(out, a, b, c *block)
+
+//go:noescape
+func blamkaSSE4(b *block)
+
+func processBlockSSE(out, in1, in2 *block, xor bool) {
+ var t block
+ mixBlocksSSE2(&t, in1, in2, &t)
+ if useSSE4 {
+ blamkaSSE4(&t)
+ } else {
+ for i := 0; i < blockLength; i += 16 {
+ blamkaGeneric(
+ &t[i+0], &t[i+1], &t[i+2], &t[i+3],
+ &t[i+4], &t[i+5], &t[i+6], &t[i+7],
+ &t[i+8], &t[i+9], &t[i+10], &t[i+11],
+ &t[i+12], &t[i+13], &t[i+14], &t[i+15],
+ )
+ }
+ for i := 0; i < blockLength/8; i += 2 {
+ blamkaGeneric(
+ &t[i], &t[i+1], &t[16+i], &t[16+i+1],
+ &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
+ &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
+ &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
+ )
+ }
+ }
+ if xor {
+ xorBlocksSSE2(out, in1, in2, &t)
+ } else {
+ mixBlocksSSE2(out, in1, in2, &t)
+ }
+}
+
+func processBlock(out, in1, in2 *block) {
+ processBlockSSE(out, in1, in2, false)
+}
+
+func processBlockXOR(out, in1, in2 *block) {
+ processBlockSSE(out, in1, in2, true)
+}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
new file mode 100644
index 000000000..8a83f7c73
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
@@ -0,0 +1,252 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
+DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
+
+DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
+DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
+
+#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
+ MOVO v4, t1; \
+ MOVO v5, v4; \
+ MOVO t1, v5; \
+ MOVO v6, t1; \
+ PUNPCKLQDQ v6, t2; \
+ PUNPCKHQDQ v7, v6; \
+ PUNPCKHQDQ t2, v6; \
+ PUNPCKLQDQ v7, t2; \
+ MOVO t1, v7; \
+ MOVO v2, t1; \
+ PUNPCKHQDQ t2, v7; \
+ PUNPCKLQDQ v3, t2; \
+ PUNPCKHQDQ t2, v2; \
+ PUNPCKLQDQ t1, t2; \
+ PUNPCKHQDQ t2, v3
+
+#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
+ MOVO v4, t1; \
+ MOVO v5, v4; \
+ MOVO t1, v5; \
+ MOVO v2, t1; \
+ PUNPCKLQDQ v2, t2; \
+ PUNPCKHQDQ v3, v2; \
+ PUNPCKHQDQ t2, v2; \
+ PUNPCKLQDQ v3, t2; \
+ MOVO t1, v3; \
+ MOVO v6, t1; \
+ PUNPCKHQDQ t2, v3; \
+ PUNPCKLQDQ v7, t2; \
+ PUNPCKHQDQ t2, v6; \
+ PUNPCKLQDQ t1, t2; \
+ PUNPCKHQDQ t2, v7
+
+#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
+ MOVO v0, t0; \
+ PMULULQ v2, t0; \
+ PADDQ v2, v0; \
+ PADDQ t0, v0; \
+ PADDQ t0, v0; \
+ PXOR v0, v6; \
+ PSHUFD $0xB1, v6, v6; \
+ MOVO v4, t0; \
+ PMULULQ v6, t0; \
+ PADDQ v6, v4; \
+ PADDQ t0, v4; \
+ PADDQ t0, v4; \
+ PXOR v4, v2; \
+ PSHUFB c40, v2; \
+ MOVO v0, t0; \
+ PMULULQ v2, t0; \
+ PADDQ v2, v0; \
+ PADDQ t0, v0; \
+ PADDQ t0, v0; \
+ PXOR v0, v6; \
+ PSHUFB c48, v6; \
+ MOVO v4, t0; \
+ PMULULQ v6, t0; \
+ PADDQ v6, v4; \
+ PADDQ t0, v4; \
+ PADDQ t0, v4; \
+ PXOR v4, v2; \
+ MOVO v2, t0; \
+ PADDQ v2, t0; \
+ PSRLQ $63, v2; \
+ PXOR t0, v2; \
+ MOVO v1, t0; \
+ PMULULQ v3, t0; \
+ PADDQ v3, v1; \
+ PADDQ t0, v1; \
+ PADDQ t0, v1; \
+ PXOR v1, v7; \
+ PSHUFD $0xB1, v7, v7; \
+ MOVO v5, t0; \
+ PMULULQ v7, t0; \
+ PADDQ v7, v5; \
+ PADDQ t0, v5; \
+ PADDQ t0, v5; \
+ PXOR v5, v3; \
+ PSHUFB c40, v3; \
+ MOVO v1, t0; \
+ PMULULQ v3, t0; \
+ PADDQ v3, v1; \
+ PADDQ t0, v1; \
+ PADDQ t0, v1; \
+ PXOR v1, v7; \
+ PSHUFB c48, v7; \
+ MOVO v5, t0; \
+ PMULULQ v7, t0; \
+ PADDQ v7, v5; \
+ PADDQ t0, v5; \
+ PADDQ t0, v5; \
+ PXOR v5, v3; \
+ MOVO v3, t0; \
+ PADDQ v3, t0; \
+ PSRLQ $63, v3; \
+ PXOR t0, v3
+
+#define LOAD_MSG_0(block, off) \
+ MOVOU 8*(off+0)(block), X0; \
+ MOVOU 8*(off+2)(block), X1; \
+ MOVOU 8*(off+4)(block), X2; \
+ MOVOU 8*(off+6)(block), X3; \
+ MOVOU 8*(off+8)(block), X4; \
+ MOVOU 8*(off+10)(block), X5; \
+ MOVOU 8*(off+12)(block), X6; \
+ MOVOU 8*(off+14)(block), X7
+
+#define STORE_MSG_0(block, off) \
+ MOVOU X0, 8*(off+0)(block); \
+ MOVOU X1, 8*(off+2)(block); \
+ MOVOU X2, 8*(off+4)(block); \
+ MOVOU X3, 8*(off+6)(block); \
+ MOVOU X4, 8*(off+8)(block); \
+ MOVOU X5, 8*(off+10)(block); \
+ MOVOU X6, 8*(off+12)(block); \
+ MOVOU X7, 8*(off+14)(block)
+
+#define LOAD_MSG_1(block, off) \
+ MOVOU 8*off+0*8(block), X0; \
+ MOVOU 8*off+16*8(block), X1; \
+ MOVOU 8*off+32*8(block), X2; \
+ MOVOU 8*off+48*8(block), X3; \
+ MOVOU 8*off+64*8(block), X4; \
+ MOVOU 8*off+80*8(block), X5; \
+ MOVOU 8*off+96*8(block), X6; \
+ MOVOU 8*off+112*8(block), X7
+
+#define STORE_MSG_1(block, off) \
+ MOVOU X0, 8*off+0*8(block); \
+ MOVOU X1, 8*off+16*8(block); \
+ MOVOU X2, 8*off+32*8(block); \
+ MOVOU X3, 8*off+48*8(block); \
+ MOVOU X4, 8*off+64*8(block); \
+ MOVOU X5, 8*off+80*8(block); \
+ MOVOU X6, 8*off+96*8(block); \
+ MOVOU X7, 8*off+112*8(block)
+
+#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
+ LOAD_MSG_0(block, off); \
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
+ SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
+ STORE_MSG_0(block, off)
+
+#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
+ LOAD_MSG_1(block, off); \
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
+ SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
+ STORE_MSG_1(block, off)
+
+// func blamkaSSE4(b *block)
+TEXT ·blamkaSSE4(SB), 4, $0-8
+ MOVQ b+0(FP), AX
+
+ MOVOU ·c40<>(SB), X10
+ MOVOU ·c48<>(SB), X11
+
+ BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
+ BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
+
+ BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
+ BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
+ RET
+
+// func mixBlocksSSE2(out, a, b, c *block)
+TEXT ·mixBlocksSSE2(SB), 4, $0-32
+ MOVQ out+0(FP), DX
+ MOVQ a+8(FP), AX
+ MOVQ b+16(FP), BX
+ MOVQ a+24(FP), CX
+ MOVQ $128, BP
+
+loop:
+ MOVOU 0(AX), X0
+ MOVOU 0(BX), X1
+ MOVOU 0(CX), X2
+ PXOR X1, X0
+ PXOR X2, X0
+ MOVOU X0, 0(DX)
+ ADDQ $16, AX
+ ADDQ $16, BX
+ ADDQ $16, CX
+ ADDQ $16, DX
+ SUBQ $2, BP
+ JA loop
+ RET
+
+// func xorBlocksSSE2(out, a, b, c *block)
+TEXT ·xorBlocksSSE2(SB), 4, $0-32
+ MOVQ out+0(FP), DX
+ MOVQ a+8(FP), AX
+ MOVQ b+16(FP), BX
+ MOVQ a+24(FP), CX
+ MOVQ $128, BP
+
+loop:
+ MOVOU 0(AX), X0
+ MOVOU 0(BX), X1
+ MOVOU 0(CX), X2
+ MOVOU 0(DX), X3
+ PXOR X1, X0
+ PXOR X2, X0
+ PXOR X3, X0
+ MOVOU X0, 0(DX)
+ ADDQ $16, AX
+ ADDQ $16, BX
+ ADDQ $16, CX
+ ADDQ $16, DX
+ SUBQ $2, BP
+ JA loop
+ RET
+
+// func supportsSSE4() bool
+TEXT ·supportsSSE4(SB), 4, $0-1
+ MOVL $1, AX
+ CPUID
+ SHRL $19, CX // Bit 19 indicates SSE4 support
+ ANDL $1, CX // CX != 0 if support SSE4
+ MOVB CX, ret+0(FP)
+ RET
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go
new file mode 100644
index 000000000..a481b2243
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package argon2
+
+var useSSE4 bool
+
+func processBlockGeneric(out, in1, in2 *block, xor bool) {
+ var t block
+ for i := range t {
+ t[i] = in1[i] ^ in2[i]
+ }
+ for i := 0; i < blockLength; i += 16 {
+ blamkaGeneric(
+ &t[i+0], &t[i+1], &t[i+2], &t[i+3],
+ &t[i+4], &t[i+5], &t[i+6], &t[i+7],
+ &t[i+8], &t[i+9], &t[i+10], &t[i+11],
+ &t[i+12], &t[i+13], &t[i+14], &t[i+15],
+ )
+ }
+ for i := 0; i < blockLength/8; i += 2 {
+ blamkaGeneric(
+ &t[i], &t[i+1], &t[16+i], &t[16+i+1],
+ &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
+ &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
+ &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
+ )
+ }
+ if xor {
+ for i := range t {
+ out[i] ^= in1[i] ^ in2[i] ^ t[i]
+ }
+ } else {
+ for i := range t {
+ out[i] = in1[i] ^ in2[i] ^ t[i]
+ }
+ }
+}
+
+func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) {
+ v00, v01, v02, v03 := *t00, *t01, *t02, *t03
+ v04, v05, v06, v07 := *t04, *t05, *t06, *t07
+ v08, v09, v10, v11 := *t08, *t09, *t10, *t11
+ v12, v13, v14, v15 := *t12, *t13, *t14, *t15
+
+ v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
+ v12 ^= v00
+ v12 = v12>>32 | v12<<32
+ v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
+ v04 ^= v08
+ v04 = v04>>24 | v04<<40
+
+ v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
+ v12 ^= v00
+ v12 = v12>>16 | v12<<48
+ v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
+ v04 ^= v08
+ v04 = v04>>63 | v04<<1
+
+ v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
+ v13 ^= v01
+ v13 = v13>>32 | v13<<32
+ v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
+ v05 ^= v09
+ v05 = v05>>24 | v05<<40
+
+ v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
+ v13 ^= v01
+ v13 = v13>>16 | v13<<48
+ v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
+ v05 ^= v09
+ v05 = v05>>63 | v05<<1
+
+ v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
+ v14 ^= v02
+ v14 = v14>>32 | v14<<32
+ v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
+ v06 ^= v10
+ v06 = v06>>24 | v06<<40
+
+ v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
+ v14 ^= v02
+ v14 = v14>>16 | v14<<48
+ v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
+ v06 ^= v10
+ v06 = v06>>63 | v06<<1
+
+ v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
+ v15 ^= v03
+ v15 = v15>>32 | v15<<32
+ v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
+ v07 ^= v11
+ v07 = v07>>24 | v07<<40
+
+ v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
+ v15 ^= v03
+ v15 = v15>>16 | v15<<48
+ v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
+ v07 ^= v11
+ v07 = v07>>63 | v07<<1
+
+ v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
+ v15 ^= v00
+ v15 = v15>>32 | v15<<32
+ v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
+ v05 ^= v10
+ v05 = v05>>24 | v05<<40
+
+ v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
+ v15 ^= v00
+ v15 = v15>>16 | v15<<48
+ v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
+ v05 ^= v10
+ v05 = v05>>63 | v05<<1
+
+ v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
+ v12 ^= v01
+ v12 = v12>>32 | v12<<32
+ v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
+ v06 ^= v11
+ v06 = v06>>24 | v06<<40
+
+ v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
+ v12 ^= v01
+ v12 = v12>>16 | v12<<48
+ v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
+ v06 ^= v11
+ v06 = v06>>63 | v06<<1
+
+ v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
+ v13 ^= v02
+ v13 = v13>>32 | v13<<32
+ v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
+ v07 ^= v08
+ v07 = v07>>24 | v07<<40
+
+ v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
+ v13 ^= v02
+ v13 = v13>>16 | v13<<48
+ v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
+ v07 ^= v08
+ v07 = v07>>63 | v07<<1
+
+ v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
+ v14 ^= v03
+ v14 = v14>>32 | v14<<32
+ v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
+ v04 ^= v09
+ v04 = v04>>24 | v04<<40
+
+ v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
+ v14 ^= v03
+ v14 = v14>>16 | v14<<48
+ v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
+ v04 ^= v09
+ v04 = v04>>63 | v04<<1
+
+ *t00, *t01, *t02, *t03 = v00, v01, v02, v03
+ *t04, *t05, *t06, *t07 = v04, v05, v06, v07
+ *t08, *t09, *t10, *t11 = v08, v09, v10, v11
+ *t12, *t13, *t14, *t15 = v12, v13, v14, v15
+}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go
new file mode 100644
index 000000000..baf7b551d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine gccgo
+
+package argon2
+
+func processBlock(out, in1, in2 *block) {
+ processBlockGeneric(out, in1, in2, false)
+}
+
+func processBlockXOR(out, in1, in2 *block) {
+ processBlockGeneric(out, in1, in2, true)
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go
new file mode 100644
index 000000000..58ea87536
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go
@@ -0,0 +1,289 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
+// and the extendable output function (XOF) BLAKE2Xb.
+//
+// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
+// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
+//
+// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
+// If you need a secret-key MAC (message authentication code), use the New512
+// function with a non-nil key.
+//
+// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
+// can produce hash values between 0 and 4 GiB.
+package blake2b
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ // The blocksize of BLAKE2b in bytes.
+ BlockSize = 128
+ // The hash size of BLAKE2b-512 in bytes.
+ Size = 64
+ // The hash size of BLAKE2b-384 in bytes.
+ Size384 = 48
+ // The hash size of BLAKE2b-256 in bytes.
+ Size256 = 32
+)
+
+var (
+ useAVX2 bool
+ useAVX bool
+ useSSE4 bool
+)
+
+var (
+ errKeySize = errors.New("blake2b: invalid key size")
+ errHashSize = errors.New("blake2b: invalid hash size")
+)
+
+var iv = [8]uint64{
+ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
+ 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
+}
+
+// Sum512 returns the BLAKE2b-512 checksum of the data.
+func Sum512(data []byte) [Size]byte {
+ var sum [Size]byte
+ checkSum(&sum, Size, data)
+ return sum
+}
+
+// Sum384 returns the BLAKE2b-384 checksum of the data.
+func Sum384(data []byte) [Size384]byte {
+ var sum [Size]byte
+ var sum384 [Size384]byte
+ checkSum(&sum, Size384, data)
+ copy(sum384[:], sum[:Size384])
+ return sum384
+}
+
+// Sum256 returns the BLAKE2b-256 checksum of the data.
+func Sum256(data []byte) [Size256]byte {
+ var sum [Size]byte
+ var sum256 [Size256]byte
+ checkSum(&sum, Size256, data)
+ copy(sum256[:], sum[:Size256])
+ return sum256
+}
+
+// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
+// key turns the hash into a MAC. The key must between zero and 64 bytes long.
+func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
+
+// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
+// key turns the hash into a MAC. The key must between zero and 64 bytes long.
+func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
+
+// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
+// key turns the hash into a MAC. The key must between zero and 64 bytes long.
+func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
+
+// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
+// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long.
+// The hash size can be a value between 1 and 64 but it is highly recommended to use
+// values equal or greater than:
+// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
+// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
+// When the key is nil, the returned hash.Hash implements BinaryMarshaler
+// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
+func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
+
+func newDigest(hashSize int, key []byte) (*digest, error) {
+ if hashSize < 1 || hashSize > Size {
+ return nil, errHashSize
+ }
+ if len(key) > Size {
+ return nil, errKeySize
+ }
+ d := &digest{
+ size: hashSize,
+ keyLen: len(key),
+ }
+ copy(d.key[:], key)
+ d.Reset()
+ return d, nil
+}
+
+func checkSum(sum *[Size]byte, hashSize int, data []byte) {
+ h := iv
+ h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
+ var c [2]uint64
+
+ if length := len(data); length > BlockSize {
+ n := length &^ (BlockSize - 1)
+ if length == n {
+ n -= BlockSize
+ }
+ hashBlocks(&h, &c, 0, data[:n])
+ data = data[n:]
+ }
+
+ var block [BlockSize]byte
+ offset := copy(block[:], data)
+ remaining := uint64(BlockSize - offset)
+ if c[0] < remaining {
+ c[1]--
+ }
+ c[0] -= remaining
+
+ hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
+
+ for i, v := range h[:(hashSize+7)/8] {
+ binary.LittleEndian.PutUint64(sum[8*i:], v)
+ }
+}
+
+type digest struct {
+ h [8]uint64
+ c [2]uint64
+ size int
+ block [BlockSize]byte
+ offset int
+
+ key [BlockSize]byte
+ keyLen int
+}
+
+const (
+ magic = "b2b"
+ marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ if d.keyLen != 0 {
+ return nil, errors.New("crypto/blake2b: cannot marshal MACs")
+ }
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ for i := 0; i < 8; i++ {
+ b = appendUint64(b, d.h[i])
+ }
+ b = appendUint64(b, d.c[0])
+ b = appendUint64(b, d.c[1])
+ // Maximum value for size is 64
+ b = append(b, byte(d.size))
+ b = append(b, d.block[:]...)
+ b = append(b, byte(d.offset))
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("crypto/blake2b: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/blake2b: invalid hash state size")
+ }
+ b = b[len(magic):]
+ for i := 0; i < 8; i++ {
+ b, d.h[i] = consumeUint64(b)
+ }
+ b, d.c[0] = consumeUint64(b)
+ b, d.c[1] = consumeUint64(b)
+ d.size = int(b[0])
+ b = b[1:]
+ copy(d.block[:], b[:BlockSize])
+ b = b[BlockSize:]
+ d.offset = int(b[0])
+ return nil
+}
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Size() int { return d.size }
+
+func (d *digest) Reset() {
+ d.h = iv
+ d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
+ d.offset, d.c[0], d.c[1] = 0, 0, 0
+ if d.keyLen > 0 {
+ d.block = d.key
+ d.offset = BlockSize
+ }
+}
+
+func (d *digest) Write(p []byte) (n int, err error) {
+ n = len(p)
+
+ if d.offset > 0 {
+ remaining := BlockSize - d.offset
+ if n <= remaining {
+ d.offset += copy(d.block[d.offset:], p)
+ return
+ }
+ copy(d.block[d.offset:], p[:remaining])
+ hashBlocks(&d.h, &d.c, 0, d.block[:])
+ d.offset = 0
+ p = p[remaining:]
+ }
+
+ if length := len(p); length > BlockSize {
+ nn := length &^ (BlockSize - 1)
+ if length == nn {
+ nn -= BlockSize
+ }
+ hashBlocks(&d.h, &d.c, 0, p[:nn])
+ p = p[nn:]
+ }
+
+ if len(p) > 0 {
+ d.offset += copy(d.block[:], p)
+ }
+
+ return
+}
+
+func (d *digest) Sum(sum []byte) []byte {
+ var hash [Size]byte
+ d.finalize(&hash)
+ return append(sum, hash[:d.size]...)
+}
+
+func (d *digest) finalize(hash *[Size]byte) {
+ var block [BlockSize]byte
+ copy(block[:], d.block[:d.offset])
+ remaining := uint64(BlockSize - d.offset)
+
+ c := d.c
+ if c[0] < remaining {
+ c[1]--
+ }
+ c[0] -= remaining
+
+ h := d.h
+ hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
+
+ for i, v := range h {
+ binary.LittleEndian.PutUint64(hash[8*i:], v)
+ }
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.BigEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func appendUint32(b []byte, x uint32) []byte {
+ var a [4]byte
+ binary.BigEndian.PutUint32(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := binary.BigEndian.Uint64(b)
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ x := binary.BigEndian.Uint32(b)
+ return b[4:], x
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
new file mode 100644
index 000000000..a1e08d7ef
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,amd64,!gccgo,!appengine
+
+package blake2b
+
+import _ "unsafe"
+
+//go:linkname x86_HasAVX internal/cpu.X86.HasAVX
+var x86_HasAVX bool
+
+//go:linkname x86_HasAVX2 internal/cpu.X86.HasAVX2
+var x86_HasAVX2 bool
+
+//go:linkname x86_HasAVX internal/cpu.X86.HasSSE4
+var x86_HasSSE4 bool
+
+func init() {
+ useAVX2 = x86_HasAVX2
+ useAVX = x86_HasAVX
+ useSSE4 = x86_HasSSE4
+}
+
+//go:noescape
+func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+
+//go:noescape
+func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+
+//go:noescape
+func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+
+func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
+ if useAVX2 {
+ hashBlocksAVX2(h, c, flag, blocks)
+ } else if useAVX {
+ hashBlocksAVX(h, c, flag, blocks)
+ } else if useSSE4 {
+ hashBlocksSSE4(h, c, flag, blocks)
+ } else {
+ hashBlocksGeneric(h, c, flag, blocks)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
new file mode 100644
index 000000000..5593b1b3d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
@@ -0,0 +1,750 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
+DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
+DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
+DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
+DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
+DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
+DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
+DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
+GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
+DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
+GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
+DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
+DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
+
+#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
+#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
+#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
+#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
+#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
+
+#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
+ VPADDQ m0, Y0, Y0; \
+ VPADDQ Y1, Y0, Y0; \
+ VPXOR Y0, Y3, Y3; \
+ VPSHUFD $-79, Y3, Y3; \
+ VPADDQ Y3, Y2, Y2; \
+ VPXOR Y2, Y1, Y1; \
+ VPSHUFB c40, Y1, Y1; \
+ VPADDQ m1, Y0, Y0; \
+ VPADDQ Y1, Y0, Y0; \
+ VPXOR Y0, Y3, Y3; \
+ VPSHUFB c48, Y3, Y3; \
+ VPADDQ Y3, Y2, Y2; \
+ VPXOR Y2, Y1, Y1; \
+ VPADDQ Y1, Y1, t; \
+ VPSRLQ $63, Y1, Y1; \
+ VPXOR t, Y1, Y1; \
+ VPERMQ_0x39_Y1_Y1; \
+ VPERMQ_0x4E_Y2_Y2; \
+ VPERMQ_0x93_Y3_Y3; \
+ VPADDQ m2, Y0, Y0; \
+ VPADDQ Y1, Y0, Y0; \
+ VPXOR Y0, Y3, Y3; \
+ VPSHUFD $-79, Y3, Y3; \
+ VPADDQ Y3, Y2, Y2; \
+ VPXOR Y2, Y1, Y1; \
+ VPSHUFB c40, Y1, Y1; \
+ VPADDQ m3, Y0, Y0; \
+ VPADDQ Y1, Y0, Y0; \
+ VPXOR Y0, Y3, Y3; \
+ VPSHUFB c48, Y3, Y3; \
+ VPADDQ Y3, Y2, Y2; \
+ VPXOR Y2, Y1, Y1; \
+ VPADDQ Y1, Y1, t; \
+ VPSRLQ $63, Y1, Y1; \
+ VPXOR t, Y1, Y1; \
+ VPERMQ_0x39_Y3_Y3; \
+ VPERMQ_0x4E_Y2_Y2; \
+ VPERMQ_0x93_Y1_Y1
+
+#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
+#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
+#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
+#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
+#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
+
+#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
+#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
+#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
+#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
+#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
+
+#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
+#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
+#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
+#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
+#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
+
+#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
+
+#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
+#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
+
+// load msg: Y12 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
+ VMOVQ_SI_X12(i0*8); \
+ VMOVQ_SI_X11(i2*8); \
+ VPINSRQ_1_SI_X12(i1*8); \
+ VPINSRQ_1_SI_X11(i3*8); \
+ VINSERTI128 $1, X11, Y12, Y12
+
+// load msg: Y13 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
+ VMOVQ_SI_X13(i0*8); \
+ VMOVQ_SI_X11(i2*8); \
+ VPINSRQ_1_SI_X13(i1*8); \
+ VPINSRQ_1_SI_X11(i3*8); \
+ VINSERTI128 $1, X11, Y13, Y13
+
+// load msg: Y14 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
+ VMOVQ_SI_X14(i0*8); \
+ VMOVQ_SI_X11(i2*8); \
+ VPINSRQ_1_SI_X14(i1*8); \
+ VPINSRQ_1_SI_X11(i3*8); \
+ VINSERTI128 $1, X11, Y14, Y14
+
+// load msg: Y15 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
+ VMOVQ_SI_X15(i0*8); \
+ VMOVQ_SI_X11(i2*8); \
+ VPINSRQ_1_SI_X15(i1*8); \
+ VPINSRQ_1_SI_X11(i3*8); \
+ VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
+ VMOVQ_SI_X12_0; \
+ VMOVQ_SI_X11(4*8); \
+ VPINSRQ_1_SI_X12(2*8); \
+ VPINSRQ_1_SI_X11(6*8); \
+ VINSERTI128 $1, X11, Y12, Y12; \
+ LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
+ LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
+ LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
+
+#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
+ LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
+ LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
+ VMOVQ_SI_X11(11*8); \
+ VPSHUFD $0x4E, 0*8(SI), X14; \
+ VPINSRQ_1_SI_X11(5*8); \
+ VINSERTI128 $1, X11, Y14, Y14; \
+ LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
+
+#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
+ VMOVQ_SI_X11(5*8); \
+ VMOVDQU 11*8(SI), X12; \
+ VPINSRQ_1_SI_X11(15*8); \
+ VINSERTI128 $1, X11, Y12, Y12; \
+ VMOVQ_SI_X13(8*8); \
+ VMOVQ_SI_X11(2*8); \
+ VPINSRQ_1_SI_X13_0; \
+ VPINSRQ_1_SI_X11(13*8); \
+ VINSERTI128 $1, X11, Y13, Y13; \
+ LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
+ LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
+
+#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
+ LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
+ LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
+ LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
+ VMOVQ_SI_X15(6*8); \
+ VMOVQ_SI_X11_0; \
+ VPINSRQ_1_SI_X15(10*8); \
+ VPINSRQ_1_SI_X11(8*8); \
+ VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
+ LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
+ VMOVQ_SI_X13_0; \
+ VMOVQ_SI_X11(4*8); \
+ VPINSRQ_1_SI_X13(7*8); \
+ VPINSRQ_1_SI_X11(15*8); \
+ VINSERTI128 $1, X11, Y13, Y13; \
+ LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
+ LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
+
+#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
+ VMOVQ_SI_X12(2*8); \
+ VMOVQ_SI_X11_0; \
+ VPINSRQ_1_SI_X12(6*8); \
+ VPINSRQ_1_SI_X11(8*8); \
+ VINSERTI128 $1, X11, Y12, Y12; \
+ LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
+ LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
+ LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
+
+#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
+ LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
+ LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
+ VMOVQ_SI_X14_0; \
+ VPSHUFD $0x4E, 8*8(SI), X11; \
+ VPINSRQ_1_SI_X14(6*8); \
+ VINSERTI128 $1, X11, Y14, Y14; \
+ LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
+
+#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
+ LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
+ LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
+ LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
+ VMOVQ_SI_X15_0; \
+ VMOVQ_SI_X11(6*8); \
+ VPINSRQ_1_SI_X15(4*8); \
+ VPINSRQ_1_SI_X11(10*8); \
+ VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
+ VMOVQ_SI_X12(6*8); \
+ VMOVQ_SI_X11(11*8); \
+ VPINSRQ_1_SI_X12(14*8); \
+ VPINSRQ_1_SI_X11_0; \
+ VINSERTI128 $1, X11, Y12, Y12; \
+ LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
+ VMOVQ_SI_X11(1*8); \
+ VMOVDQU 12*8(SI), X14; \
+ VPINSRQ_1_SI_X11(10*8); \
+ VINSERTI128 $1, X11, Y14, Y14; \
+ VMOVQ_SI_X15(2*8); \
+ VMOVDQU 4*8(SI), X11; \
+ VPINSRQ_1_SI_X15(7*8); \
+ VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
+ LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
+ VMOVQ_SI_X13(2*8); \
+ VPSHUFD $0x4E, 5*8(SI), X11; \
+ VPINSRQ_1_SI_X13(4*8); \
+ VINSERTI128 $1, X11, Y13, Y13; \
+ LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
+ VMOVQ_SI_X15(11*8); \
+ VMOVQ_SI_X11(12*8); \
+ VPINSRQ_1_SI_X15(14*8); \
+ VPINSRQ_1_SI_X11_0; \
+ VINSERTI128 $1, X11, Y15, Y15
+
+// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+
+ MOVQ SP, DX
+ MOVQ SP, R9
+ ADDQ $31, R9
+ ANDQ $~31, R9
+ MOVQ R9, SP
+
+ MOVQ CX, 16(SP)
+ XORQ CX, CX
+ MOVQ CX, 24(SP)
+
+ VMOVDQU ·AVX2_c40<>(SB), Y4
+ VMOVDQU ·AVX2_c48<>(SB), Y5
+
+ VMOVDQU 0(AX), Y8
+ VMOVDQU 32(AX), Y9
+ VMOVDQU ·AVX2_iv0<>(SB), Y6
+ VMOVDQU ·AVX2_iv1<>(SB), Y7
+
+ MOVQ 0(BX), R8
+ MOVQ 8(BX), R9
+ MOVQ R9, 8(SP)
+
+loop:
+ ADDQ $128, R8
+ MOVQ R8, 0(SP)
+ CMPQ R8, $128
+ JGE noinc
+ INCQ R9
+ MOVQ R9, 8(SP)
+
+noinc:
+ VMOVDQA Y8, Y0
+ VMOVDQA Y9, Y1
+ VMOVDQA Y6, Y2
+ VPXOR 0(SP), Y7, Y3
+
+ LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
+ VMOVDQA Y12, 32(SP)
+ VMOVDQA Y13, 64(SP)
+ VMOVDQA Y14, 96(SP)
+ VMOVDQA Y15, 128(SP)
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
+ VMOVDQA Y12, 160(SP)
+ VMOVDQA Y13, 192(SP)
+ VMOVDQA Y14, 224(SP)
+ VMOVDQA Y15, 256(SP)
+
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+ LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
+ ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+
+ ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
+ ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
+
+ VPXOR Y0, Y8, Y8
+ VPXOR Y1, Y9, Y9
+ VPXOR Y2, Y8, Y8
+ VPXOR Y3, Y9, Y9
+
+ LEAQ 128(SI), SI
+ SUBQ $128, DI
+ JNE loop
+
+ MOVQ R8, 0(BX)
+ MOVQ R9, 8(BX)
+
+ VMOVDQU Y8, 0(AX)
+ VMOVDQU Y9, 32(AX)
+ VZEROUPPER
+
+ MOVQ DX, SP
+ RET
+
+#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
+#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
+#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
+#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
+#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
+
+#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
+#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
+#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
+#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
+#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
+#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
+#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
+#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
+
+#define SHUFFLE_AVX() \
+ VMOVDQA X6, X13; \
+ VMOVDQA X2, X14; \
+ VMOVDQA X4, X6; \
+ VPUNPCKLQDQ_X13_X13_X15; \
+ VMOVDQA X5, X4; \
+ VMOVDQA X6, X5; \
+ VPUNPCKHQDQ_X15_X7_X6; \
+ VPUNPCKLQDQ_X7_X7_X15; \
+ VPUNPCKHQDQ_X15_X13_X7; \
+ VPUNPCKLQDQ_X3_X3_X15; \
+ VPUNPCKHQDQ_X15_X2_X2; \
+ VPUNPCKLQDQ_X14_X14_X15; \
+ VPUNPCKHQDQ_X15_X3_X3; \
+
+#define SHUFFLE_AVX_INV() \
+ VMOVDQA X2, X13; \
+ VMOVDQA X4, X14; \
+ VPUNPCKLQDQ_X2_X2_X15; \
+ VMOVDQA X5, X4; \
+ VPUNPCKHQDQ_X15_X3_X2; \
+ VMOVDQA X14, X5; \
+ VPUNPCKLQDQ_X3_X3_X15; \
+ VMOVDQA X6, X14; \
+ VPUNPCKHQDQ_X15_X13_X3; \
+ VPUNPCKLQDQ_X7_X7_X15; \
+ VPUNPCKHQDQ_X15_X6_X6; \
+ VPUNPCKLQDQ_X14_X14_X15; \
+ VPUNPCKHQDQ_X15_X7_X7; \
+
+#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
+ VPADDQ m0, v0, v0; \
+ VPADDQ v2, v0, v0; \
+ VPADDQ m1, v1, v1; \
+ VPADDQ v3, v1, v1; \
+ VPXOR v0, v6, v6; \
+ VPXOR v1, v7, v7; \
+ VPSHUFD $-79, v6, v6; \
+ VPSHUFD $-79, v7, v7; \
+ VPADDQ v6, v4, v4; \
+ VPADDQ v7, v5, v5; \
+ VPXOR v4, v2, v2; \
+ VPXOR v5, v3, v3; \
+ VPSHUFB c40, v2, v2; \
+ VPSHUFB c40, v3, v3; \
+ VPADDQ m2, v0, v0; \
+ VPADDQ v2, v0, v0; \
+ VPADDQ m3, v1, v1; \
+ VPADDQ v3, v1, v1; \
+ VPXOR v0, v6, v6; \
+ VPXOR v1, v7, v7; \
+ VPSHUFB c48, v6, v6; \
+ VPSHUFB c48, v7, v7; \
+ VPADDQ v6, v4, v4; \
+ VPADDQ v7, v5, v5; \
+ VPXOR v4, v2, v2; \
+ VPXOR v5, v3, v3; \
+ VPADDQ v2, v2, t0; \
+ VPSRLQ $63, v2, v2; \
+ VPXOR t0, v2, v2; \
+ VPADDQ v3, v3, t0; \
+ VPSRLQ $63, v3, v3; \
+ VPXOR t0, v3, v3
+
+// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
+// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
+#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
+ VMOVQ_SI_X12(i0*8); \
+ VMOVQ_SI_X13(i2*8); \
+ VMOVQ_SI_X14(i4*8); \
+ VMOVQ_SI_X15(i6*8); \
+ VPINSRQ_1_SI_X12(i1*8); \
+ VPINSRQ_1_SI_X13(i3*8); \
+ VPINSRQ_1_SI_X14(i5*8); \
+ VPINSRQ_1_SI_X15(i7*8)
+
+// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
+#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
+ VMOVQ_SI_X12_0; \
+ VMOVQ_SI_X13(4*8); \
+ VMOVQ_SI_X14(1*8); \
+ VMOVQ_SI_X15(5*8); \
+ VPINSRQ_1_SI_X12(2*8); \
+ VPINSRQ_1_SI_X13(6*8); \
+ VPINSRQ_1_SI_X14(3*8); \
+ VPINSRQ_1_SI_X15(7*8)
+
+// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
+#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
+ VPSHUFD $0x4E, 0*8(SI), X12; \
+ VMOVQ_SI_X13(11*8); \
+ VMOVQ_SI_X14(12*8); \
+ VMOVQ_SI_X15(7*8); \
+ VPINSRQ_1_SI_X13(5*8); \
+ VPINSRQ_1_SI_X14(2*8); \
+ VPINSRQ_1_SI_X15(3*8)
+
+// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
+#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
+ VMOVDQU 11*8(SI), X12; \
+ VMOVQ_SI_X13(5*8); \
+ VMOVQ_SI_X14(8*8); \
+ VMOVQ_SI_X15(2*8); \
+ VPINSRQ_1_SI_X13(15*8); \
+ VPINSRQ_1_SI_X14_0; \
+ VPINSRQ_1_SI_X15(13*8)
+
+// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
+#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
+ VMOVQ_SI_X12(2*8); \
+ VMOVQ_SI_X13(4*8); \
+ VMOVQ_SI_X14(6*8); \
+ VMOVQ_SI_X15_0; \
+ VPINSRQ_1_SI_X12(5*8); \
+ VPINSRQ_1_SI_X13(15*8); \
+ VPINSRQ_1_SI_X14(10*8); \
+ VPINSRQ_1_SI_X15(8*8)
+
+// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
+#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
+ VMOVQ_SI_X12(9*8); \
+ VMOVQ_SI_X13(2*8); \
+ VMOVQ_SI_X14_0; \
+ VMOVQ_SI_X15(4*8); \
+ VPINSRQ_1_SI_X12(5*8); \
+ VPINSRQ_1_SI_X13(10*8); \
+ VPINSRQ_1_SI_X14(7*8); \
+ VPINSRQ_1_SI_X15(15*8)
+
+// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
+#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
+ VMOVQ_SI_X12(2*8); \
+ VMOVQ_SI_X13_0; \
+ VMOVQ_SI_X14(12*8); \
+ VMOVQ_SI_X15(11*8); \
+ VPINSRQ_1_SI_X12(6*8); \
+ VPINSRQ_1_SI_X13(8*8); \
+ VPINSRQ_1_SI_X14(10*8); \
+ VPINSRQ_1_SI_X15(3*8)
+
+// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
+#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
+ MOVQ 0*8(SI), X12; \
+ VPSHUFD $0x4E, 8*8(SI), X13; \
+ MOVQ 7*8(SI), X14; \
+ MOVQ 2*8(SI), X15; \
+ VPINSRQ_1_SI_X12(6*8); \
+ VPINSRQ_1_SI_X14(3*8); \
+ VPINSRQ_1_SI_X15(11*8)
+
+// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
+#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
+ MOVQ 6*8(SI), X12; \
+ MOVQ 11*8(SI), X13; \
+ MOVQ 15*8(SI), X14; \
+ MOVQ 3*8(SI), X15; \
+ VPINSRQ_1_SI_X12(14*8); \
+ VPINSRQ_1_SI_X13_0; \
+ VPINSRQ_1_SI_X14(9*8); \
+ VPINSRQ_1_SI_X15(8*8)
+
+// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
+#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
+ MOVQ 5*8(SI), X12; \
+ MOVQ 8*8(SI), X13; \
+ MOVQ 0*8(SI), X14; \
+ MOVQ 6*8(SI), X15; \
+ VPINSRQ_1_SI_X12(15*8); \
+ VPINSRQ_1_SI_X13(2*8); \
+ VPINSRQ_1_SI_X14(4*8); \
+ VPINSRQ_1_SI_X15(10*8)
+
+// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
+#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
+ VMOVDQU 12*8(SI), X12; \
+ MOVQ 1*8(SI), X13; \
+ MOVQ 2*8(SI), X14; \
+ VPINSRQ_1_SI_X13(10*8); \
+ VPINSRQ_1_SI_X14(7*8); \
+ VMOVDQU 4*8(SI), X15
+
+// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
+#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
+ MOVQ 15*8(SI), X12; \
+ MOVQ 3*8(SI), X13; \
+ MOVQ 11*8(SI), X14; \
+ MOVQ 12*8(SI), X15; \
+ VPINSRQ_1_SI_X12(9*8); \
+ VPINSRQ_1_SI_X13(13*8); \
+ VPINSRQ_1_SI_X14(14*8); \
+ VPINSRQ_1_SI_X15_0
+
+// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+
+ MOVQ SP, BP
+ MOVQ SP, R9
+ ADDQ $15, R9
+ ANDQ $~15, R9
+ MOVQ R9, SP
+
+ VMOVDQU ·AVX_c40<>(SB), X0
+ VMOVDQU ·AVX_c48<>(SB), X1
+ VMOVDQA X0, X8
+ VMOVDQA X1, X9
+
+ VMOVDQU ·AVX_iv3<>(SB), X0
+ VMOVDQA X0, 0(SP)
+ XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
+
+ VMOVDQU 0(AX), X10
+ VMOVDQU 16(AX), X11
+ VMOVDQU 32(AX), X2
+ VMOVDQU 48(AX), X3
+
+ MOVQ 0(BX), R8
+ MOVQ 8(BX), R9
+
+loop:
+ ADDQ $128, R8
+ CMPQ R8, $128
+ JGE noinc
+ INCQ R9
+
+noinc:
+ VMOVQ_R8_X15
+ VPINSRQ_1_R9_X15
+
+ VMOVDQA X10, X0
+ VMOVDQA X11, X1
+ VMOVDQU ·AVX_iv0<>(SB), X4
+ VMOVDQU ·AVX_iv1<>(SB), X5
+ VMOVDQU ·AVX_iv2<>(SB), X6
+
+ VPXOR X15, X6, X6
+ VMOVDQA 0(SP), X7
+
+ LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
+ VMOVDQA X12, 16(SP)
+ VMOVDQA X13, 32(SP)
+ VMOVDQA X14, 48(SP)
+ VMOVDQA X15, 64(SP)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
+ VMOVDQA X12, 80(SP)
+ VMOVDQA X13, 96(SP)
+ VMOVDQA X14, 112(SP)
+ VMOVDQA X15, 128(SP)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
+ VMOVDQA X12, 144(SP)
+ VMOVDQA X13, 160(SP)
+ VMOVDQA X14, 176(SP)
+ VMOVDQA X15, 192(SP)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
+ VMOVDQA X12, 208(SP)
+ VMOVDQA X13, 224(SP)
+ VMOVDQA X14, 240(SP)
+ VMOVDQA X15, 256(SP)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX()
+ LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
+ SHUFFLE_AVX()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
+ SHUFFLE_AVX()
+ HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
+ SHUFFLE_AVX_INV()
+
+ VMOVDQU 32(AX), X14
+ VMOVDQU 48(AX), X15
+ VPXOR X0, X10, X10
+ VPXOR X1, X11, X11
+ VPXOR X2, X14, X14
+ VPXOR X3, X15, X15
+ VPXOR X4, X10, X10
+ VPXOR X5, X11, X11
+ VPXOR X6, X14, X2
+ VPXOR X7, X15, X3
+ VMOVDQU X2, 32(AX)
+ VMOVDQU X3, 48(AX)
+
+ LEAQ 128(SI), SI
+ SUBQ $128, DI
+ JNE loop
+
+ VMOVDQU X10, 0(AX)
+ VMOVDQU X11, 16(AX)
+
+ MOVQ R8, 0(BX)
+ MOVQ R9, 8(BX)
+ VZEROUPPER
+
+ MOVQ BP, SP
+ RET
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
new file mode 100644
index 000000000..2ab7c30fc
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7,amd64,!gccgo,!appengine
+
+package blake2b
+
+func init() {
+ useSSE4 = supportsSSE4()
+}
+
+//go:noescape
+func supportsSSE4() bool
+
+//go:noescape
+func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+
+func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
+ if useSSE4 {
+ hashBlocksSSE4(h, c, flag, blocks)
+ } else {
+ hashBlocksGeneric(h, c, flag, blocks)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
new file mode 100644
index 000000000..64530740b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
@@ -0,0 +1,290 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
+DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
+GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
+
+DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
+DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
+
+DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
+DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
+GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
+
+DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
+GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
+
+DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
+DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
+
+DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
+DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
+
+#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
+ MOVO v4, t1; \
+ MOVO v5, v4; \
+ MOVO t1, v5; \
+ MOVO v6, t1; \
+ PUNPCKLQDQ v6, t2; \
+ PUNPCKHQDQ v7, v6; \
+ PUNPCKHQDQ t2, v6; \
+ PUNPCKLQDQ v7, t2; \
+ MOVO t1, v7; \
+ MOVO v2, t1; \
+ PUNPCKHQDQ t2, v7; \
+ PUNPCKLQDQ v3, t2; \
+ PUNPCKHQDQ t2, v2; \
+ PUNPCKLQDQ t1, t2; \
+ PUNPCKHQDQ t2, v3
+
+#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
+ MOVO v4, t1; \
+ MOVO v5, v4; \
+ MOVO t1, v5; \
+ MOVO v2, t1; \
+ PUNPCKLQDQ v2, t2; \
+ PUNPCKHQDQ v3, v2; \
+ PUNPCKHQDQ t2, v2; \
+ PUNPCKLQDQ v3, t2; \
+ MOVO t1, v3; \
+ MOVO v6, t1; \
+ PUNPCKHQDQ t2, v3; \
+ PUNPCKLQDQ v7, t2; \
+ PUNPCKHQDQ t2, v6; \
+ PUNPCKLQDQ t1, t2; \
+ PUNPCKHQDQ t2, v7
+
+#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
+ PADDQ m0, v0; \
+ PADDQ m1, v1; \
+ PADDQ v2, v0; \
+ PADDQ v3, v1; \
+ PXOR v0, v6; \
+ PXOR v1, v7; \
+ PSHUFD $0xB1, v6, v6; \
+ PSHUFD $0xB1, v7, v7; \
+ PADDQ v6, v4; \
+ PADDQ v7, v5; \
+ PXOR v4, v2; \
+ PXOR v5, v3; \
+ PSHUFB c40, v2; \
+ PSHUFB c40, v3; \
+ PADDQ m2, v0; \
+ PADDQ m3, v1; \
+ PADDQ v2, v0; \
+ PADDQ v3, v1; \
+ PXOR v0, v6; \
+ PXOR v1, v7; \
+ PSHUFB c48, v6; \
+ PSHUFB c48, v7; \
+ PADDQ v6, v4; \
+ PADDQ v7, v5; \
+ PXOR v4, v2; \
+ PXOR v5, v3; \
+ MOVOU v2, t0; \
+ PADDQ v2, t0; \
+ PSRLQ $63, v2; \
+ PXOR t0, v2; \
+ MOVOU v3, t0; \
+ PADDQ v3, t0; \
+ PSRLQ $63, v3; \
+ PXOR t0, v3
+
+#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
+ MOVQ i0*8(src), m0; \
+ PINSRQ $1, i1*8(src), m0; \
+ MOVQ i2*8(src), m1; \
+ PINSRQ $1, i3*8(src), m1; \
+ MOVQ i4*8(src), m2; \
+ PINSRQ $1, i5*8(src), m2; \
+ MOVQ i6*8(src), m3; \
+ PINSRQ $1, i7*8(src), m3
+
+// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+
+ MOVQ SP, BP
+ MOVQ SP, R9
+ ADDQ $15, R9
+ ANDQ $~15, R9
+ MOVQ R9, SP
+
+ MOVOU ·iv3<>(SB), X0
+ MOVO X0, 0(SP)
+ XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
+
+ MOVOU ·c40<>(SB), X13
+ MOVOU ·c48<>(SB), X14
+
+ MOVOU 0(AX), X12
+ MOVOU 16(AX), X15
+
+ MOVQ 0(BX), R8
+ MOVQ 8(BX), R9
+
+loop:
+ ADDQ $128, R8
+ CMPQ R8, $128
+ JGE noinc
+ INCQ R9
+
+noinc:
+ MOVQ R8, X8
+ PINSRQ $1, R9, X8
+
+ MOVO X12, X0
+ MOVO X15, X1
+ MOVOU 32(AX), X2
+ MOVOU 48(AX), X3
+ MOVOU ·iv0<>(SB), X4
+ MOVOU ·iv1<>(SB), X5
+ MOVOU ·iv2<>(SB), X6
+
+ PXOR X8, X6
+ MOVO 0(SP), X7
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
+ MOVO X8, 16(SP)
+ MOVO X9, 32(SP)
+ MOVO X10, 48(SP)
+ MOVO X11, 64(SP)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
+ MOVO X8, 80(SP)
+ MOVO X9, 96(SP)
+ MOVO X10, 112(SP)
+ MOVO X11, 128(SP)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
+ MOVO X8, 144(SP)
+ MOVO X9, 160(SP)
+ MOVO X10, 176(SP)
+ MOVO X11, 192(SP)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
+ MOVO X8, 208(SP)
+ MOVO X9, 224(SP)
+ MOVO X10, 240(SP)
+ MOVO X11, 256(SP)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
+ SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+ HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
+ SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+
+ MOVOU 32(AX), X10
+ MOVOU 48(AX), X11
+ PXOR X0, X12
+ PXOR X1, X15
+ PXOR X2, X10
+ PXOR X3, X11
+ PXOR X4, X12
+ PXOR X5, X15
+ PXOR X6, X10
+ PXOR X7, X11
+ MOVOU X10, 32(AX)
+ MOVOU X11, 48(AX)
+
+ LEAQ 128(SI), SI
+ SUBQ $128, DI
+ JNE loop
+
+ MOVOU X12, 0(AX)
+ MOVOU X15, 16(AX)
+
+ MOVQ R8, 0(BX)
+ MOVQ R9, 8(BX)
+
+ MOVQ BP, SP
+ RET
+
+// func supportsSSE4() bool
+TEXT ·supportsSSE4(SB), 4, $0-1
+ MOVL $1, AX
+ CPUID
+ SHRL $19, CX // Bit 19 indicates SSE4 support
+ ANDL $1, CX // CX != 0 if support SSE4
+ MOVB CX, ret+0(FP)
+ RET
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
new file mode 100644
index 000000000..4bd2abc91
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blake2b
+
+import "encoding/binary"
+
+// the precomputed values for BLAKE2b
+// there are 12 16-byte arrays - one for each round
+// the entries are calculated from the sigma constants.
+var precomputed = [12][16]byte{
+ {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
+ {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
+ {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
+ {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
+ {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
+ {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
+ {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
+ {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
+ {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
+ {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
+ {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
+ {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
+}
+
+func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
+ var m [16]uint64
+ c0, c1 := c[0], c[1]
+
+ for i := 0; i < len(blocks); {
+ c0 += BlockSize
+ if c0 < BlockSize {
+ c1++
+ }
+
+ v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
+ v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
+ v12 ^= c0
+ v13 ^= c1
+ v14 ^= flag
+
+ for j := range m {
+ m[j] = binary.LittleEndian.Uint64(blocks[i:])
+ i += 8
+ }
+
+ for j := range precomputed {
+ s := &(precomputed[j])
+
+ v0 += m[s[0]]
+ v0 += v4
+ v12 ^= v0
+ v12 = v12<<(64-32) | v12>>32
+ v8 += v12
+ v4 ^= v8
+ v4 = v4<<(64-24) | v4>>24
+ v1 += m[s[1]]
+ v1 += v5
+ v13 ^= v1
+ v13 = v13<<(64-32) | v13>>32
+ v9 += v13
+ v5 ^= v9
+ v5 = v5<<(64-24) | v5>>24
+ v2 += m[s[2]]
+ v2 += v6
+ v14 ^= v2
+ v14 = v14<<(64-32) | v14>>32
+ v10 += v14
+ v6 ^= v10
+ v6 = v6<<(64-24) | v6>>24
+ v3 += m[s[3]]
+ v3 += v7
+ v15 ^= v3
+ v15 = v15<<(64-32) | v15>>32
+ v11 += v15
+ v7 ^= v11
+ v7 = v7<<(64-24) | v7>>24
+
+ v0 += m[s[4]]
+ v0 += v4
+ v12 ^= v0
+ v12 = v12<<(64-16) | v12>>16
+ v8 += v12
+ v4 ^= v8
+ v4 = v4<<(64-63) | v4>>63
+ v1 += m[s[5]]
+ v1 += v5
+ v13 ^= v1
+ v13 = v13<<(64-16) | v13>>16
+ v9 += v13
+ v5 ^= v9
+ v5 = v5<<(64-63) | v5>>63
+ v2 += m[s[6]]
+ v2 += v6
+ v14 ^= v2
+ v14 = v14<<(64-16) | v14>>16
+ v10 += v14
+ v6 ^= v10
+ v6 = v6<<(64-63) | v6>>63
+ v3 += m[s[7]]
+ v3 += v7
+ v15 ^= v3
+ v15 = v15<<(64-16) | v15>>16
+ v11 += v15
+ v7 ^= v11
+ v7 = v7<<(64-63) | v7>>63
+
+ v0 += m[s[8]]
+ v0 += v5
+ v15 ^= v0
+ v15 = v15<<(64-32) | v15>>32
+ v10 += v15
+ v5 ^= v10
+ v5 = v5<<(64-24) | v5>>24
+ v1 += m[s[9]]
+ v1 += v6
+ v12 ^= v1
+ v12 = v12<<(64-32) | v12>>32
+ v11 += v12
+ v6 ^= v11
+ v6 = v6<<(64-24) | v6>>24
+ v2 += m[s[10]]
+ v2 += v7
+ v13 ^= v2
+ v13 = v13<<(64-32) | v13>>32
+ v8 += v13
+ v7 ^= v8
+ v7 = v7<<(64-24) | v7>>24
+ v3 += m[s[11]]
+ v3 += v4
+ v14 ^= v3
+ v14 = v14<<(64-32) | v14>>32
+ v9 += v14
+ v4 ^= v9
+ v4 = v4<<(64-24) | v4>>24
+
+ v0 += m[s[12]]
+ v0 += v5
+ v15 ^= v0
+ v15 = v15<<(64-16) | v15>>16
+ v10 += v15
+ v5 ^= v10
+ v5 = v5<<(64-63) | v5>>63
+ v1 += m[s[13]]
+ v1 += v6
+ v12 ^= v1
+ v12 = v12<<(64-16) | v12>>16
+ v11 += v12
+ v6 ^= v11
+ v6 = v6<<(64-63) | v6>>63
+ v2 += m[s[14]]
+ v2 += v7
+ v13 ^= v2
+ v13 = v13<<(64-16) | v13>>16
+ v8 += v13
+ v7 ^= v8
+ v7 = v7<<(64-63) | v7>>63
+ v3 += m[s[15]]
+ v3 += v4
+ v14 ^= v3
+ v14 = v14<<(64-16) | v14>>16
+ v9 += v14
+ v4 ^= v9
+ v4 = v4<<(64-63) | v4>>63
+
+ }
+
+ h[0] ^= v0 ^ v8
+ h[1] ^= v1 ^ v9
+ h[2] ^= v2 ^ v10
+ h[3] ^= v3 ^ v11
+ h[4] ^= v4 ^ v12
+ h[5] ^= v5 ^ v13
+ h[6] ^= v6 ^ v14
+ h[7] ^= v7 ^ v15
+ }
+ c[0], c[1] = c0, c1
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
new file mode 100644
index 000000000..da156a1ba
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine gccgo
+
+package blake2b
+
+func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
+ hashBlocksGeneric(h, c, flag, blocks)
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go
new file mode 100644
index 000000000..c814496a7
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go
@@ -0,0 +1,177 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blake2b
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// XOF defines the interface to hash functions that
+// support arbitrary-length output.
+type XOF interface {
+ // Write absorbs more data into the hash's state. It panics if called
+ // after Read.
+ io.Writer
+
+ // Read reads more output from the hash. It returns io.EOF if the limit
+ // has been reached.
+ io.Reader
+
+ // Clone returns a copy of the XOF in its current state.
+ Clone() XOF
+
+ // Reset resets the XOF to its initial state.
+ Reset()
+}
+
+// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
+// the the length of the output is not known in advance.
+const OutputLengthUnknown = 0
+
+// magicUnknownOutputLength is a magic value for the output size that indicates
+// an unknown number of output bytes.
+const magicUnknownOutputLength = (1 << 32) - 1
+
+// maxOutputLength is the absolute maximum number of bytes to produce when the
+// number of output bytes is unknown.
+const maxOutputLength = (1 << 32) * 64
+
+// NewXOF creates a new variable-output-length hash. The hash either produce a
+// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
+// (size == OutputLengthUnknown). In the latter case, an absolute limit of
+// 256GiB applies.
+//
+// A non-nil key turns the hash into a MAC. The key must between
+// zero and 32 bytes long.
+func NewXOF(size uint32, key []byte) (XOF, error) {
+ if len(key) > Size {
+ return nil, errKeySize
+ }
+ if size == magicUnknownOutputLength {
+ // 2^32-1 indicates an unknown number of bytes and thus isn't a
+ // valid length.
+ return nil, errors.New("blake2b: XOF length too large")
+ }
+ if size == OutputLengthUnknown {
+ size = magicUnknownOutputLength
+ }
+ x := &xof{
+ d: digest{
+ size: Size,
+ keyLen: len(key),
+ },
+ length: size,
+ }
+ copy(x.d.key[:], key)
+ x.Reset()
+ return x, nil
+}
+
+type xof struct {
+ d digest
+ length uint32
+ remaining uint64
+ cfg, root, block [Size]byte
+ offset int
+ nodeOffset uint32
+ readMode bool
+}
+
+func (x *xof) Write(p []byte) (n int, err error) {
+ if x.readMode {
+ panic("blake2b: write to XOF after read")
+ }
+ return x.d.Write(p)
+}
+
+func (x *xof) Clone() XOF {
+ clone := *x
+ return &clone
+}
+
+func (x *xof) Reset() {
+ x.cfg[0] = byte(Size)
+ binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
+ binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
+ x.cfg[17] = byte(Size) // inner hash size
+
+ x.d.Reset()
+ x.d.h[1] ^= uint64(x.length) << 32
+
+ x.remaining = uint64(x.length)
+ if x.remaining == magicUnknownOutputLength {
+ x.remaining = maxOutputLength
+ }
+ x.offset, x.nodeOffset = 0, 0
+ x.readMode = false
+}
+
+func (x *xof) Read(p []byte) (n int, err error) {
+ if !x.readMode {
+ x.d.finalize(&x.root)
+ x.readMode = true
+ }
+
+ if x.remaining == 0 {
+ return 0, io.EOF
+ }
+
+ n = len(p)
+ if uint64(n) > x.remaining {
+ n = int(x.remaining)
+ p = p[:n]
+ }
+
+ if x.offset > 0 {
+ blockRemaining := Size - x.offset
+ if n < blockRemaining {
+ x.offset += copy(p, x.block[x.offset:])
+ x.remaining -= uint64(n)
+ return
+ }
+ copy(p, x.block[x.offset:])
+ p = p[blockRemaining:]
+ x.offset = 0
+ x.remaining -= uint64(blockRemaining)
+ }
+
+ for len(p) >= Size {
+ binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
+ x.nodeOffset++
+
+ x.d.initConfig(&x.cfg)
+ x.d.Write(x.root[:])
+ x.d.finalize(&x.block)
+
+ copy(p, x.block[:])
+ p = p[Size:]
+ x.remaining -= uint64(Size)
+ }
+
+ if todo := len(p); todo > 0 {
+ if x.remaining < uint64(Size) {
+ x.cfg[0] = byte(x.remaining)
+ }
+ binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
+ x.nodeOffset++
+
+ x.d.initConfig(&x.cfg)
+ x.d.Write(x.root[:])
+ x.d.finalize(&x.block)
+
+ x.offset = copy(p, x.block[:todo])
+ x.remaining -= uint64(todo)
+ }
+ return
+}
+
+func (d *digest) initConfig(cfg *[Size]byte) {
+ d.offset, d.c[0], d.c[1] = 0, 0, 0
+ for i := range d.h {
+ d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
+ }
+}
diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go
new file mode 100644
index 000000000..efd689af4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blake2b/register.go
@@ -0,0 +1,32 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package blake2b
+
+import (
+ "crypto"
+ "hash"
+)
+
+func init() {
+ newHash256 := func() hash.Hash {
+ h, _ := New256(nil)
+ return h
+ }
+ newHash384 := func() hash.Hash {
+ h, _ := New384(nil)
+ return h
+ }
+
+ newHash512 := func() hash.Hash {
+ h, _ := New512(nil)
+ return h
+ }
+
+ crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
+ crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
+ crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
+}