summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-07-20 15:22:49 -0700
committerGitHub <noreply@github.com>2017-07-20 15:22:49 -0700
commit58839cefb50e56ae5b157b37e9814ae83ceee70b (patch)
tree5de966481678096fc9567f74f96673b34a65127c /vendor/github.com/minio
parente2f4492eadb5d3c58606b1fdd5774b63a07c236a (diff)
downloadchat-58839cefb50e56ae5b157b37e9814ae83ceee70b.tar.gz
chat-58839cefb50e56ae5b157b37e9814ae83ceee70b.tar.bz2
chat-58839cefb50e56ae5b157b37e9814ae83ceee70b.zip
Upgrading server dependancies (#6984)
Diffstat (limited to 'vendor/github.com/minio')
-rw-r--r--vendor/github.com/minio/go-homedir/LICENSE21
-rw-r--r--vendor/github.com/minio/go-homedir/README.md16
-rw-r--r--vendor/github.com/minio/go-homedir/dir_posix.go64
-rw-r--r--vendor/github.com/minio/go-homedir/dir_windows.go28
-rw-r--r--vendor/github.com/minio/go-homedir/homedir.go68
-rw-r--r--vendor/github.com/minio/go-homedir/homedir_test.go114
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object.go532
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object_test.go88
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response.go3
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object-file.go6
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go29
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go9
-rw-r--r--vendor/github.com/minio/minio-go/api-list.go48
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go6
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go47
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go146
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket_test.go273
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go141
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go56
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-encrypted.go46
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file.go234
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go271
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-progress.go201
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-readat.go247
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-streaming.go436
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go228
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go20
-rw-r--r--vendor/github.com/minio/minio-go/api-s3-datatypes.go10
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go21
-rw-r--r--vendor/github.com/minio/minio-go/api.go297
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v2_test.go643
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v4_test.go726
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go91
-rw-r--r--vendor/github.com/minio/minio-go/appveyor.yml2
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go60
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache_test.go49
-rw-r--r--vendor/github.com/minio/minio-go/constants.go14
-rw-r--r--vendor/github.com/minio/minio-go/copy-conditions.go99
-rw-r--r--vendor/github.com/minio/minio-go/core.go12
-rw-r--r--vendor/github.com/minio/minio-go/core_test.go42
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md215
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/composeobject.go74
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/copyobject.go18
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go6
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go5
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go87
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go5
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/chain.go89
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go137
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample17
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/credentials.go175
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample12
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go73
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/doc.go45
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go71
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go62
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_test.go105
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go120
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go129
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_test.go189
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go227
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go180
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go76
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/static.go67
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/static_test.go68
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go9
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/interface.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go60
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go6
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go13
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go8
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go93
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go85
-rw-r--r--vendor/github.com/minio/minio-go/request-headers.go4
-rw-r--r--vendor/github.com/minio/minio-go/s3-endpoints.go1
-rw-r--r--vendor/github.com/minio/minio-go/s3-error.go2
-rw-r--r--vendor/github.com/minio/minio-go/signature-type.go45
-rw-r--r--vendor/github.com/minio/minio-go/tempfile.go60
-rw-r--r--vendor/github.com/minio/minio-go/utils.go105
-rw-r--r--vendor/github.com/minio/minio-go/utils_test.go96
80 files changed, 5401 insertions, 2885 deletions
diff --git a/vendor/github.com/minio/go-homedir/LICENSE b/vendor/github.com/minio/go-homedir/LICENSE
new file mode 100644
index 000000000..f9c841a51
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/minio/go-homedir/README.md b/vendor/github.com/minio/go-homedir/README.md
new file mode 100644
index 000000000..085f57775
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/README.md
@@ -0,0 +1,16 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package is not
+available on certain architectures such as i386 or PNaCl. Additionally
+it has a cgo dependency on Darwin systems. This means that any Go code
+that uses that package cannot cross compile. But 99% of the time the
+use for `os/user` is just to retrieve the home directory, which we can
+do for the current user without cgo. This library does that, enabling
+cross-compilation.
diff --git a/vendor/github.com/minio/go-homedir/dir_posix.go b/vendor/github.com/minio/go-homedir/dir_posix.go
new file mode 100644
index 000000000..4615fe063
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/dir_posix.go
@@ -0,0 +1,64 @@
+// +build !windows
+
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+)
+
+// dir returns the homedir of current user for all POSIX compatible
+// operating systems.
+func dir() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // user.Current is not implemented for i386 and PNaCL like environments.
+ if currUser, err := user.Current(); err == nil {
+ return currUser.HomeDir, nil
+ }
+
+ // If that fails, try getent
+ var stdout bytes.Buffer
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If "getent" is missing, ignore it
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/minio/go-homedir/dir_windows.go b/vendor/github.com/minio/go-homedir/dir_windows.go
new file mode 100644
index 000000000..85e5218c7
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/dir_windows.go
@@ -0,0 +1,28 @@
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+package homedir
+
+import (
+ "errors"
+ "os"
+)
+
+// dir returns the homedir of current user for MS Windows OS.
+func dir() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ if home == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/minio/go-homedir/homedir.go b/vendor/github.com/minio/go-homedir/homedir.go
new file mode 100644
index 000000000..092373801
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/homedir.go
@@ -0,0 +1,68 @@
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+// Package homedir implements a portable function to determine current user's homedir.
+package homedir
+
+import (
+ "errors"
+ "path/filepath"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.Mutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ // Return cached homedir if available.
+ if !DisableCache {
+ if homedirCache != "" {
+ return homedirCache, nil
+ }
+ }
+
+ // Determine OS speific current homedir.
+ result, err := dir()
+ if err != nil {
+ return "", err
+ }
+
+ // Cache for future lookups.
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
diff --git a/vendor/github.com/minio/go-homedir/homedir_test.go b/vendor/github.com/minio/go-homedir/homedir_test.go
new file mode 100644
index 000000000..a45121ff1
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/homedir_test.go
@@ -0,0 +1,114 @@
+package homedir
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "testing"
+)
+
+func patchEnv(key, value string) func() {
+ bck := os.Getenv(key)
+ deferFunc := func() {
+ os.Setenv(key, bck)
+ }
+
+ os.Setenv(key, value)
+ return deferFunc
+}
+
+func BenchmarkDir(b *testing.B) {
+ // We do this for any "warmups"
+ for i := 0; i < 10; i++ {
+ Dir()
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Dir()
+ }
+}
+
+func TestDir(t *testing.T) {
+ // NOTE: This test is not portable. If user.Current() worked
+ // everywhere, we wouldn't need our package in the first place.
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if u.HomeDir != dir {
+ t.Fatalf("%#v != %#v", u.HomeDir, dir)
+ }
+}
+
+func TestExpand(t *testing.T) {
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ cases := []struct {
+ Input string
+ Output string
+ Err bool
+ }{
+ {
+ "/foo",
+ "/foo",
+ false,
+ },
+
+ {
+ "~/foo",
+ filepath.Join(u.HomeDir, "foo"),
+ false,
+ },
+
+ {
+ "",
+ "",
+ false,
+ },
+
+ {
+ "~",
+ u.HomeDir,
+ false,
+ },
+
+ {
+ "~foo/foo",
+ "",
+ true,
+ },
+ }
+
+ for _, tc := range cases {
+ actual, err := Expand(tc.Input)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err)
+ }
+
+ if actual != tc.Output {
+ t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual)
+ }
+ }
+
+ DisableCache = true
+ defer func() { DisableCache = false }()
+ defer patchEnv("HOME", "/custom/path/")()
+ expected := filepath.Join("/", "custom", "path", "foo/bar")
+ actual, err := Expand("~/foo/bar")
+
+ if err != nil {
+ t.Errorf("No error is expected, got: %v", err)
+ } else if actual != expected {
+ t.Errorf("Expected: %v; actual: %v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
new file mode 100644
index 000000000..6baf09e84
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -0,0 +1,532 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// SSEInfo - represents Server-Side-Encryption parameters specified by
+// a user.
+type SSEInfo struct {
+ key []byte
+ algo string
+}
+
+// NewSSEInfo - specifies (binary or un-encoded) encryption key and
+// algorithm name. If algo is empty, it defaults to "AES256". Ref:
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+func NewSSEInfo(key []byte, algo string) SSEInfo {
+ if algo == "" {
+ algo = "AES256"
+ }
+ return SSEInfo{key, algo}
+}
+
+// internal method that computes SSE-C headers
+func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
+ if s == nil {
+ return nil
+ }
+
+ cs := ""
+ if isCopySource {
+ cs = "copy-source-"
+ }
+ return map[string]string{
+ "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
+ "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
+ "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
+ }
+}
+
+// GetSSEHeaders - computes and returns headers for SSE-C as key-value
+// pairs. They can be set as metadata in PutObject* requests (for
+// encryption) or be set as request headers in `Core.GetObject` (for
+// decryption).
+func (s *SSEInfo) GetSSEHeaders() map[string]string {
+ return s.getSSEHeaders(false)
+}
+
+// DestinationInfo - type with information about the object to be
+// created via server-side copy requests, using the Compose API.
+type DestinationInfo struct {
+ bucket, object string
+
+ // key for encrypting destination
+ encryption *SSEInfo
+
+ // if no user-metadata is provided, it is copied from source
+ // (when there is only once source object in the compose
+ // request)
+ userMetadata map[string]string
+}
+
+// NewDestinationInfo - creates a compose-object/copy-source
+// destination info object.
+//
+// `encSSEC` is the key info for server-side-encryption with customer
+// provided key. If it is nil, no encryption is performed.
+//
+// `userMeta` is the user-metadata key-value pairs to be set on the
+// destination. The keys are automatically prefixed with `x-amz-meta-`
+// if needed. If nil is passed, and if only a single source (of any
+// size) is provided in the ComposeObject call, then metadata from the
+// source is copied to the destination.
+func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
+ userMeta map[string]string) (d DestinationInfo, err error) {
+
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucket); err != nil {
+ return d, err
+ }
+ if err = s3utils.CheckValidObjectName(object); err != nil {
+ return d, err
+ }
+
+ // Process custom-metadata to remove a `x-amz-meta-` prefix if
+ // present and validate that keys are distinct (after this
+ // prefix removal).
+ m := make(map[string]string)
+ for k, v := range userMeta {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ k = k[len("x-amz-meta-"):]
+ }
+ if _, ok := m[k]; ok {
+ return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
+ }
+ m[k] = v
+ }
+
+ return DestinationInfo{
+ bucket: bucket,
+ object: object,
+ encryption: encryptSSEC,
+ userMetadata: m,
+ }, nil
+}
+
+// getUserMetaHeadersMap - construct appropriate key-value pairs to send
+// as headers from metadata map to pass into copy-object request. For
+// single part copy-object (i.e. non-multipart object), enable the
+// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
+// `REPLACE`, so that metadata headers from the source are not copied
+// over.
+func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
+ if len(d.userMetadata) == 0 {
+ return nil
+ }
+ r := make(map[string]string)
+ if withCopyDirectiveHeader {
+ r["x-amz-metadata-directive"] = "REPLACE"
+ }
+ for k, v := range d.userMetadata {
+ r["x-amz-meta-"+k] = v
+ }
+ return r
+}
+
+// SourceInfo - represents a source object to be copied, using
+// server-side copying APIs.
+type SourceInfo struct {
+ bucket, object string
+
+ start, end int64
+
+ decryptKey *SSEInfo
+ // Headers to send with the upload-part-copy request involving
+ // this source object.
+ Headers http.Header
+}
+
+// NewSourceInfo - create a compose-object/copy-object source info
+// object.
+//
+// `decryptSSEC` is the decryption key using server-side-encryption
+// with customer provided key. It may be nil if the source is not
+// encrypted.
+func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
+ r := SourceInfo{
+ bucket: bucket,
+ object: object,
+ start: -1, // range is unspecified by default
+ decryptKey: decryptSSEC,
+ Headers: make(http.Header),
+ }
+
+ // Set the source header
+ r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
+
+ // Assemble decryption headers for upload-part-copy request
+ for k, v := range decryptSSEC.getSSEHeaders(true) {
+ r.Headers.Set(k, v)
+ }
+
+ return r
+}
+
+// SetRange - Set the start and end offset of the source object to be
+// copied. If this method is not called, the whole source object is
+// copied.
+func (s *SourceInfo) SetRange(start, end int64) error {
+ if start > end || start < 0 {
+ return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
+ }
+ // Note that 0 <= start <= end
+ s.start, s.end = start, end
+ return nil
+}
+
+// SetMatchETagCond - Set ETag match condition. The object is copied
+// only if the etag of the source matches the value given here.
+func (s *SourceInfo) SetMatchETagCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-match", etag)
+ return nil
+}
+
+// SetMatchETagExceptCond - Set the ETag match exception
+// condition. The object is copied only if the etag of the source is
+// not the value given here.
+func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-none-match", etag)
+ return nil
+}
+
+// SetModifiedSinceCond - Set the modified since condition.
+func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetUnmodifiedSinceCond - Set the unmodified since condition.
+func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// Helper to fetch size and etag of an object using a StatObject call.
+func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
+ // Get object info - need size and etag here. Also, decryption
+ // headers are added to the stat request if given.
+ var objInfo ObjectInfo
+ rh := NewGetReqHeaders()
+ for k, v := range s.decryptKey.getSSEHeaders(false) {
+ rh.Set(k, v)
+ }
+ objInfo, err = c.statObject(s.bucket, s.object, rh)
+ if err != nil {
+ err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
+ } else {
+ size = objInfo.Size
+ etag = objInfo.ETag
+ userMeta = make(map[string]string)
+ for k, v := range objInfo.Metadata {
+ if strings.HasPrefix(k, "x-amz-meta-") {
+ if len(v) > 0 {
+ userMeta[k] = v[0]
+ }
+ }
+ }
+ }
+ return
+}
+
+// uploadPartCopy - helper function to create a part in a multipart
+// upload via an upload-part-copy request
+// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
+func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
+ headers http.Header) (p CompletePart, err error) {
+
+ // Build query parameters
+ urlValues := make(url.Values)
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ urlValues.Set("uploadId", uploadID)
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: bucket,
+ objectName: object,
+ customHeader: headers,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return p, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, bucket, object)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
+ return p, nil
+}
+
+// ComposeObject - creates an object using server-side copying of
+// existing objects. It takes a list of source objects (with optional
+// offsets) and concatenates them into a new object using only
+// server-side copying operations.
+func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
+ if len(srcs) < 1 || len(srcs) > maxPartsCount {
+ return ErrInvalidArgument("There must be as least one and upto 10000 source objects.")
+ }
+
+ srcSizes := make([]int64, len(srcs))
+ var totalSize, size, totalParts int64
+ var srcUserMeta map[string]string
+ var etag string
+ var err error
+ for i, src := range srcs {
+ size, etag, srcUserMeta, err = src.getProps(c)
+ if err != nil {
+ return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
+ }
+
+ // Error out if client side encryption is used in this source object when
+ // more than one source objects are given.
+ if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
+ return ErrInvalidArgument(
+ fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
+ }
+
+ // Since we did a HEAD to get size, we use the ETag
+ // value to make sure the object has not changed by
+ // the time we perform the copy. This is done, only if
+ // the user has not set their own ETag match
+ // condition.
+ if src.Headers.Get("x-amz-copy-source-if-match") == "" {
+ src.SetMatchETagCond(etag)
+ }
+
+ // Check if a segment is specified, and if so, is the
+ // segment within object bounds?
+ if src.start != -1 {
+ // Since range is specified,
+ // 0 <= src.start <= src.end
+ // so only invalid case to check is:
+ if src.end >= size {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
+ i, src.start, src.end, size))
+ }
+ size = src.end - src.start + 1
+ }
+
+ // Only the last source may be less than `absMinPartSize`
+ if size < absMinPartSize && i < len(srcs)-1 {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
+ }
+
+ // Is data to copy too large?
+ totalSize += size
+ if totalSize > maxMultipartPutObjectSize {
+ return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
+ }
+
+ // record source size
+ srcSizes[i] = size
+
+ // calculate parts needed for current source
+ totalParts += partsRequired(size)
+ // Do we need more parts than we are allowed?
+ if totalParts > maxPartsCount {
+ return ErrInvalidArgument(fmt.Sprintf(
+ "Your proposed compose object requires more than %d parts", maxPartsCount))
+ }
+ }
+
+ // Single source object case (i.e. when only one source is
+ // involved, it is being copied wholly and at most 5GiB in
+ // size).
+ if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
+ h := srcs[0].Headers
+ // Add destination encryption headers
+ for k, v := range dst.encryption.getSSEHeaders(false) {
+ h.Set(k, v)
+ }
+
+ // If no user metadata is specified (and so, the
+ // for-loop below is not entered), metadata from the
+ // source is copied to the destination (due to
+ // single-part copy-object PUT request behaviour).
+ for k, v := range dst.getUserMetaHeadersMap(true) {
+ h.Set(k, v)
+ }
+
+ // Send copy request
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: dst.bucket,
+ objectName: dst.object,
+ customHeader: h,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, dst.bucket, dst.object)
+ }
+
+ // Return nil on success.
+ return nil
+ }
+
+ // Now, handle multipart-copy cases.
+
+ // 1. Initiate a new multipart upload.
+
+ // Set user-metadata on the destination object. If no
+ // user-metadata is specified, and there is only one source,
+ // (only) then metadata from source is copied.
+ userMeta := dst.getUserMetaHeadersMap(false)
+ metaMap := userMeta
+ if len(userMeta) == 0 && len(srcs) == 1 {
+ metaMap = srcUserMeta
+ }
+ metaHeaders := make(map[string][]string)
+ for k, v := range metaMap {
+ metaHeaders[k] = append(metaHeaders[k], v)
+ }
+ uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
+ if err != nil {
+ return fmt.Errorf("Error creating new upload: %v", err)
+ }
+
+ // 2. Perform copy part uploads
+ objParts := []CompletePart{}
+ partIndex := 1
+ for i, src := range srcs {
+ h := src.Headers
+ // Add destination encryption headers
+ for k, v := range dst.encryption.getSSEHeaders(false) {
+ h.Set(k, v)
+ }
+
+ // calculate start/end indices of parts after
+ // splitting.
+ startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
+ for j, start := range startIdx {
+ end := endIdx[j]
+
+ // Add (or reset) source range header for
+ // upload part copy request.
+ h.Set("x-amz-copy-source-range",
+ fmt.Sprintf("bytes=%d-%d", start, end))
+
+ // make upload-part-copy request
+ complPart, err := c.uploadPartCopy(dst.bucket,
+ dst.object, uploadID, partIndex, h)
+ if err != nil {
+ return fmt.Errorf("Error in upload-part-copy - %v", err)
+ }
+ objParts = append(objParts, complPart)
+ partIndex++
+ }
+ }
+
+ // 3. Make final complete-multipart request.
+ _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
+ completeMultipartUpload{Parts: objParts})
+ if err != nil {
+ err = fmt.Errorf("Error in complete-multipart request - %v", err)
+ }
+ return err
+}
+
+// partsRequired is ceiling(size / copyPartSize)
+func partsRequired(size int64) int64 {
+ r := size / copyPartSize
+ if size%copyPartSize > 0 {
+ r++
+ }
+ return r
+}
+
+// calculateEvenSplits - computes splits for a source and returns
+// start and end index slices. Splits happen evenly to be sure that no
+// part is less than 5MiB, as that could fail the multipart request if
+// it is not the last part.
+func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
+ if size == 0 {
+ return
+ }
+
+ reqParts := partsRequired(size)
+ startIndex = make([]int64, reqParts)
+ endIndex = make([]int64, reqParts)
+ // Compute number of required parts `k`, as:
+ //
+ // k = ceiling(size / copyPartSize)
+ //
+ // Now, distribute the `size` bytes in the source into
+ // k parts as evenly as possible:
+ //
+ // r parts sized (q+1) bytes, and
+ // (k - r) parts sized q bytes, where
+ //
+ // size = q * k + r (by simple division of size by k,
+ // so that 0 <= r < k)
+ //
+ start := src.start
+ if start == -1 {
+ start = 0
+ }
+ quot, rem := size/reqParts, size%reqParts
+ nextStart := start
+ for j := int64(0); j < reqParts; j++ {
+ curPartSize := quot
+ if j < rem {
+ curPartSize++
+ }
+
+ cStart := nextStart
+ cEnd := cStart + curPartSize - 1
+ nextStart = cEnd + 1
+
+ startIndex[j], endIndex[j] = cStart, cEnd
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/api-compose-object_test.go b/vendor/github.com/minio/minio-go/api-compose-object_test.go
new file mode 100644
index 000000000..5339d2027
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-compose-object_test.go
@@ -0,0 +1,88 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package minio
+
+import (
+ "reflect"
+ "testing"
+)
+
+const (
+ gb1 = 1024 * 1024 * 1024
+ gb5 = 5 * gb1
+ gb5p1 = gb5 + 1
+ gb10p1 = 2*gb5 + 1
+ gb10p2 = 2*gb5 + 2
+)
+
+func TestPartsRequired(t *testing.T) {
+ testCases := []struct {
+ size, ref int64
+ }{
+ {0, 0},
+ {1, 1},
+ {gb5, 1},
+ {2 * gb5, 2},
+ {gb10p1, 3},
+ {gb10p2, 3},
+ }
+
+ for i, testCase := range testCases {
+ res := partsRequired(testCase.size)
+ if res != testCase.ref {
+ t.Errorf("Test %d - output did not match with reference results", i+1)
+ }
+ }
+}
+
+func TestCalculateEvenSplits(t *testing.T) {
+
+ testCases := []struct {
+ // input size and source object
+ size int64
+ src SourceInfo
+
+ // output part-indexes
+ starts, ends []int64
+ }{
+ {0, SourceInfo{start: -1}, nil, nil},
+ {1, SourceInfo{start: -1}, []int64{0}, []int64{0}},
+ {1, SourceInfo{start: 0}, []int64{0}, []int64{0}},
+
+ {gb1, SourceInfo{start: -1}, []int64{0}, []int64{gb1 - 1}},
+ {gb5, SourceInfo{start: -1}, []int64{0}, []int64{gb5 - 1}},
+
+ // 2 part splits
+ {gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
+ {gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
+
+ // 3 part splits
+ {gb10p1, SourceInfo{start: -1},
+ []int64{0, gb10p1/3 + 1, 2*gb10p1/3 + 1},
+ []int64{gb10p1 / 3, 2 * gb10p1 / 3, gb10p1 - 1}},
+
+ {gb10p2, SourceInfo{start: -1},
+ []int64{0, gb10p2 / 3, 2 * gb10p2 / 3},
+ []int64{gb10p2/3 - 1, 2*gb10p2/3 - 1, gb10p2 - 1}},
+ }
+
+ for i, testCase := range testCases {
+ resStart, resEnd := calculateEvenSplits(testCase.size, testCase.src)
+ if !reflect.DeepEqual(testCase.starts, resStart) || !reflect.DeepEqual(testCase.ends, resEnd) {
+ t.Errorf("Test %d - output did not match with reference results", i+1)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
index ff8b8b109..e0019a334 100644
--- a/vendor/github.com/minio/minio-go/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -161,6 +161,9 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
if errResp.Region == "" {
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
+ if errResp.Code == "InvalidRegion" && errResp.Region != "" {
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
+ }
// Save headers returned in the API XML error
errResp.Headers = resp.Header
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
index 477a0969f..c4193e934 100644
--- a/vendor/github.com/minio/minio-go/api-get-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -20,15 +20,17 @@ import (
"io"
"os"
"path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index 2abd4608e..1078d2f98 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -26,11 +26,12 @@ import (
"time"
"github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels
-func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) {
-
+// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
+// returned stream should be closed by the caller.
+func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
if encryptMaterials == nil {
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
@@ -57,10 +58,10 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
@@ -328,14 +329,14 @@ func (o *Object) setOffset(bytesRead int64) error {
// Update the currentOffset.
o.currOffset += bytesRead
- if o.currOffset >= o.objectInfo.Size {
+ if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
return io.EOF
}
return nil
}
// Read reads up to len(b) bytes into b. It returns the number of
-// bytes read (0 <= n <= len(p)) and any error encountered. Returns
+// bytes read (0 <= n <= len(b)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
if o == nil {
@@ -442,7 +443,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o.objectInfoSet {
// If offset is negative than we return io.EOF.
// If offset is greater than or equal to object size we return io.EOF.
- if offset >= o.objectInfo.Size || offset < 0 {
+ if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
return 0, io.EOF
}
}
@@ -542,16 +543,20 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
case 0:
- if offset > o.objectInfo.Size {
+ if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
- if o.currOffset+offset > o.objectInfo.Size {
+ if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
+ // If we don't know the object size return an error for io.SeekEnd
+ if o.objectInfo.Size < 0 {
+ return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
+ }
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
@@ -623,10 +628,10 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
}
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index 7491df330..10ccdc66b 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -23,15 +23,16 @@ import (
"net/url"
"github.com/minio/minio-go/pkg/policy"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketPolicy - get bucket policy at a given path.
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return policy.BucketPolicyNone, err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return policy.BucketPolicyNone, err
}
policyInfo, err := c.getBucketPolicy(bucketName)
@@ -48,10 +49,10 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return map[string]policy.BucketPolicy{}, err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return map[string]policy.BucketPolicy{}, err
}
policyInfo, err := c.getBucketPolicy(bucketName)
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
index 6a228179e..6de1fe9b3 100644
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -17,10 +17,13 @@
package minio
import (
+ "errors"
"fmt"
"net/http"
"net/url"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// ListBuckets list all buckets owned by this authenticated user.
@@ -69,7 +72,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
-// // Recurively list all objects in 'mytestbucket'
+// // Recursively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
@@ -84,18 +87,21 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// If recursive we do not delimit.
delimiter = ""
}
+
// Return object owner information by default
fetchOwner := true
+
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
+
// Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -120,7 +126,6 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// If contents are available loop through and send over channel.
for _, object := range result.Contents {
- // Save the marker.
select {
// Send object content.
case objectStatCh <- object:
@@ -133,12 +138,12 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
- object := ObjectInfo{}
- object.Key = obj.Prefix
- object.Size = 0
select {
// Send object prefixes.
- case objectStatCh <- object:
+ case objectStatCh <- ObjectInfo{
+ Key: obj.Prefix,
+ Size: 0,
+ }:
// If receives done from the caller, return here.
case <-doneCh:
return
@@ -170,11 +175,11 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
}
// Validate object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return ListBucketV2Result{}, err
}
// Get resources properly escaped and lined up before
@@ -227,10 +232,17 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
// Decode listBuckets XML.
listBucketResult := ListBucketV2Result{}
- err = xmlDecoder(resp.Body, &listBucketResult)
- if err != nil {
+ if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
return listBucketResult, err
}
+
+ // This is an additional verification check to make
+ // sure proper responses are received.
+ if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
+ return listBucketResult, errors.New("Truncated response should have continuation token set")
+ }
+
+ // Success.
return listBucketResult, nil
}
@@ -266,7 +278,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
delimiter = ""
}
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -274,7 +286,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
return objectStatCh
}
// Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -350,11 +362,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketResult{}, err
}
// Validate object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return ListBucketResult{}, err
}
// Get resources properly escaped and lined up before
@@ -442,7 +454,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
delimiter = ""
}
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
@@ -450,7 +462,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
return objectMultipartStatCh
}
// Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
index cbea1c6da..25a283af5 100644
--- a/vendor/github.com/minio/minio-go/api-notification.go
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -30,7 +30,7 @@ import (
// GetBucketNotification - get bucket notification at a given path.
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return BucketNotification{}, err
}
notification, err := c.getBucketNotification(bucketName)
@@ -140,7 +140,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
defer close(notificationInfoCh)
// Validate the bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
@@ -155,7 +155,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
return
}
- // Continously run and listen on bucket notification.
+ // Continuously run and listen on bucket notification.
// Create a done channel to control 'ListObjects' go routine.
retryDoneCh := make(chan struct{}, 1)
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index f9d05ab9b..8cfcb55fb 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -42,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
if err := isValidExpiry(expires); err != nil {
@@ -122,21 +122,38 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
+ // Get credentials from the configured credentials provider.
+ credValues, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ signerType = credValues.SignerType
+ sessionToken = credValues.SessionToken
+ accessKeyID = credValues.AccessKeyID
+ secretAccessKey = credValues.SecretAccessKey
+ )
+
+ if signerType.IsAnonymous() {
+ return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
+ }
+
// Keep time.
t := time.Now().UTC()
// For signature version '2' handle here.
- if c.signature.isV2() {
+ if signerType.IsV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
- p.formData["GoogleAccessId"] = c.accessKeyID
+ p.formData["GoogleAccessId"] = accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
- p.formData["AWSAccessKeyId"] = c.accessKeyID
+ p.formData["AWSAccessKeyId"] = accessKeyID
}
// Sign the policy.
- p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
+ p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
return u, p.formData, nil
}
@@ -159,7 +176,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
- credential := s3signer.GetCredential(c.accessKeyID, location, t)
+ credential := s3signer.GetCredential(accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@@ -168,13 +185,27 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
+ if sessionToken != "" {
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-security-token",
+ value: sessionToken,
+ }); err != nil {
+ return nil, nil, err
+ }
+ }
+
// Get base64 encoded policy.
policyBase64 := p.base64()
+
// Fill in the form data.
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ if sessionToken != "" {
+ p.formData["x-amz-security-token"] = sessionToken
+ }
+ p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
return u, p.formData, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index 001da6de3..fd37dc192 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,18 +19,14 @@ package minio
import (
"bytes"
- "encoding/base64"
- "encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
- "io/ioutil"
"net/http"
"net/url"
- "path"
"github.com/minio/minio-go/pkg/policy"
- "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
/// Bucket operations
@@ -50,95 +47,23 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
}()
// Validate the input arguments.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
return err
}
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
location = "us-east-1"
- }
-
- // Try creating bucket with the provided region, in case of
- // invalid region error let's guess the appropriate region
- // from S3 API headers
-
- // Create a done channel to control 'newRetryTimer' go routine.
- doneCh := make(chan struct{}, 1)
-
- // Indicate to our routine to exit cleanly upon return.
- defer close(doneCh)
-
- // Blank indentifier is kept here on purpose since 'range' without
- // blank identifiers is only supported since go1.4
- // https://golang.org/doc/go1.4#forrange.
- for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
- // Initialize the makeBucket request.
- req, err := c.makeBucketRequest(bucketName, location)
- if err != nil {
- return err
- }
-
- // Execute make bucket request.
- resp, err := c.do(req)
- defer closeResponse(resp)
- if err != nil {
- return err
+ // For custom region clients, default
+ // to custom region instead not 'us-east-1'.
+ if c.region != "" {
+ location = c.region
}
-
- if resp.StatusCode != http.StatusOK {
- err := httpRespToErrorResponse(resp, bucketName, "")
- errResp := ToErrorResponse(err)
- if errResp.Code == "InvalidRegion" && errResp.Region != "" {
- // Fetch bucket region found in headers
- // of S3 error response, attempt bucket
- // create again.
- location = errResp.Region
- continue
- }
- // Nothing to retry, fail.
- return err
- }
-
- // Control reaches here when bucket create was successful,
- // break out.
- break
- }
-
- // Success.
- return nil
-}
-
-// Low level wrapper API For makeBucketRequest.
-func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
- // Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
- return nil, err
}
-
- // In case of Amazon S3. The make bucket issued on
- // already existing bucket would fail with
- // 'AuthorizationMalformed' error if virtual style is
- // used. So we default to 'path style' as that is the
- // preferred method here. The final location of the
- // 'bucket' is provided through XML LocationConstraint
- // data with the request.
- targetURL := c.endpointURL
- targetURL.Path = path.Join(bucketName, "") + "/"
-
- // get a new HTTP request for the method.
- req, err := http.NewRequest("PUT", targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent for the request.
- c.setUserAgent(req)
-
- // set sha256 sum for signature calculation only with
- // signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ // PUT bucket request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ bucketLocation: location,
}
// If location is not 'us-east-1' create bucket location config.
@@ -148,30 +73,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil {
- return nil, err
- }
- createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
- req.Body = ioutil.NopCloser(createBucketConfigBuffer)
- req.ContentLength = int64(len(createBucketConfigBytes))
- // Set content-md5.
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
- if c.signature.isV4() {
- // Set sha256.
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
+ return err
}
+ reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
+ reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
+ reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
+ reqMetadata.contentLength = int64(len(createBucketConfigBytes))
}
- // Sign the request.
- if c.signature.isV4() {
- // Signature calculated for MakeBucket request should be for 'us-east-1',
- // regardless of the bucket's location constraint.
- req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
+ // Execute PUT to create a new bucket.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
}
- // Return signed request.
- return req, nil
+ // Success.
+ return nil
}
// SetBucketPolicy set the access permissions on an existing bucket.
@@ -184,10 +108,10 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// writeonly - anonymous put/delete access to a given object prefix.
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return err
}
@@ -216,7 +140,7 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
// Saves a new bucket policy.
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
@@ -262,7 +186,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(bucketName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
@@ -286,7 +210,7 @@ func (c Client) removeBucketPolicy(bucketName string) error {
// SetBucketNotification saves a new bucket notification.
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket_test.go b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
deleted file mode 100644
index ec33c8492..000000000
--- a/vendor/github.com/minio/minio-go/api-put-bucket_test.go
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "encoding/xml"
- "io"
- "io/ioutil"
- "net/http"
- "path"
- "testing"
-
- "github.com/minio/minio-go/pkg/s3signer"
-)
-
-// Tests validate http request formulated for creation of bucket.
-func TestMakeBucketRequest(t *testing.T) {
- // Generates expected http request for bucket creation.
- // Used for asserting with the actual request generated.
- createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
- targetURL := c.endpointURL
- targetURL.Path = path.Join(bucketName, "") + "/"
-
- // get a new HTTP request for the method.
- var err error
- req, err = http.NewRequest("PUT", targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent for the request.
- c.setUserAgent(req)
-
- // set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
- }
-
- // If location is not 'us-east-1' create bucket location config.
- if location != "us-east-1" && location != "" {
- createBucketConfig := createBucketConfiguration{}
- createBucketConfig.Location = location
- var createBucketConfigBytes []byte
- createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
- if err != nil {
- return nil, err
- }
- createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
- req.Body = ioutil.NopCloser(createBucketConfigBuffer)
- req.ContentLength = int64(len(createBucketConfigBytes))
- // Set content-md5.
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
- if c.signature.isV4() {
- // Set sha256.
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
- }
- }
-
- // Sign the request.
- if c.signature.isV4() {
- // Signature calculated for MakeBucket request should be for 'us-east-1',
- // regardless of the bucket's location constraint.
- req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
- }
-
- // Return signed request.
- return req, nil
- }
-
- // Get Request body.
- getReqBody := func(reqBody io.ReadCloser) (string, error) {
- contents, err := ioutil.ReadAll(reqBody)
- if err != nil {
- return "", err
- }
- return string(contents), nil
- }
-
- // Info for 'Client' creation.
- // Will be used as arguments for 'NewClient'.
- type infoForClient struct {
- endPoint string
- accessKey string
- secretKey string
- enableInsecure bool
- }
- // dataset for 'NewClient' call.
- info := []infoForClient{
- // endpoint localhost.
- // both access-key and secret-key are empty.
- {"localhost:9000", "", "", false},
- // both access-key are secret-key exists.
- {"localhost:9000", "my-access-key", "my-secret-key", false},
- // one of acess-key and secret-key are empty.
- {"localhost:9000", "", "my-secret-key", false},
-
- // endpoint amazon s3.
- {"s3.amazonaws.com", "", "", false},
- {"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
- {"s3.amazonaws.com", "my-acess-key", "", false},
-
- // endpoint google cloud storage.
- {"storage.googleapis.com", "", "", false},
- {"storage.googleapis.com", "my-access-key", "my-secret-key", false},
- {"storage.googleapis.com", "", "my-secret-key", false},
-
- // endpoint custom domain running Minio server.
- {"play.minio.io", "", "", false},
- {"play.minio.io", "my-access-key", "my-secret-key", false},
- {"play.minio.io", "my-acess-key", "", false},
- }
-
- testCases := []struct {
- bucketName string
- location string
- // data for new client creation.
- info infoForClient
- // error in the output.
- err error
- // flag indicating whether tests should pass.
- shouldPass bool
- }{
- // Test cases with Invalid bucket name.
- {".mybucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
- {"mybucket.", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
- {"mybucket-", "", infoForClient{}, ErrInvalidBucketName("Bucket name contains invalid characters."), false},
- {"my", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
- {"", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be empty."), false},
- {"my..bucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
-
- // Test case with all valid values for S3 bucket location.
- // Client is constructed using the info struct.
- // case with empty location.
- {"my-bucket", "", info[0], nil, true},
- // case with location set to standard 'us-east-1'.
- {"my-bucket", "us-east-1", info[0], nil, true},
- // case with location set to a value different from 'us-east-1'.
- {"my-bucket", "eu-central-1", info[0], nil, true},
-
- {"my-bucket", "", info[1], nil, true},
- {"my-bucket", "us-east-1", info[1], nil, true},
- {"my-bucket", "eu-central-1", info[1], nil, true},
-
- {"my-bucket", "", info[2], nil, true},
- {"my-bucket", "us-east-1", info[2], nil, true},
- {"my-bucket", "eu-central-1", info[2], nil, true},
-
- {"my-bucket", "", info[3], nil, true},
- {"my-bucket", "us-east-1", info[3], nil, true},
- {"my-bucket", "eu-central-1", info[3], nil, true},
-
- {"my-bucket", "", info[4], nil, true},
- {"my-bucket", "us-east-1", info[4], nil, true},
- {"my-bucket", "eu-central-1", info[4], nil, true},
-
- {"my-bucket", "", info[5], nil, true},
- {"my-bucket", "us-east-1", info[5], nil, true},
- {"my-bucket", "eu-central-1", info[5], nil, true},
-
- {"my-bucket", "", info[6], nil, true},
- {"my-bucket", "us-east-1", info[6], nil, true},
- {"my-bucket", "eu-central-1", info[6], nil, true},
-
- {"my-bucket", "", info[7], nil, true},
- {"my-bucket", "us-east-1", info[7], nil, true},
- {"my-bucket", "eu-central-1", info[7], nil, true},
-
- {"my-bucket", "", info[8], nil, true},
- {"my-bucket", "us-east-1", info[8], nil, true},
- {"my-bucket", "eu-central-1", info[8], nil, true},
-
- {"my-bucket", "", info[9], nil, true},
- {"my-bucket", "us-east-1", info[9], nil, true},
- {"my-bucket", "eu-central-1", info[9], nil, true},
-
- {"my-bucket", "", info[10], nil, true},
- {"my-bucket", "us-east-1", info[10], nil, true},
- {"my-bucket", "eu-central-1", info[10], nil, true},
-
- {"my-bucket", "", info[11], nil, true},
- {"my-bucket", "us-east-1", info[11], nil, true},
- {"my-bucket", "eu-central-1", info[11], nil, true},
- }
-
- for i, testCase := range testCases {
- // cannot create a newclient with empty endPoint value.
- // validates and creates a new client only if the endPoint value is not empty.
- client := &Client{}
- var err error
- if testCase.info.endPoint != "" {
-
- client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
- if err != nil {
- t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
- }
- }
-
- actualReq, err := client.makeBucketRequest(testCase.bucketName, testCase.location)
- if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
- }
- if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
- }
- // Failed as expected, but does it fail for the expected reason.
- if err != nil && !testCase.shouldPass {
- if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
- }
- }
-
- // Test passes as expected, but the output values are verified for correctness here.
- if err == nil && testCase.shouldPass {
- expectedReq := &http.Request{}
- expectedReq, err = createExpectedRequest(client, testCase.bucketName, testCase.location, expectedReq)
- if err != nil {
- t.Fatalf("Test %d: Expected request Creation failed", i+1)
- }
- if expectedReq.Method != actualReq.Method {
- t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
- }
- if expectedReq.URL.String() != actualReq.URL.String() {
- t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
- }
- if expectedReq.ContentLength != actualReq.ContentLength {
- t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
- }
-
- if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
- t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
- }
- if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
- t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
- }
-
- if testCase.location != "us-east-1" && testCase.location != "" {
- expectedContent, err := getReqBody(expectedReq.Body)
- if err != nil {
- t.Fatalf("Test %d: Coudln't parse request body", i+1)
- }
- actualContent, err := getReqBody(actualReq.Body)
- if err != nil {
- t.Fatalf("Test %d: Coudln't parse request body", i+1)
- }
- if expectedContent != actualContent {
- t.Errorf("Test %d: Expected request body doesn't match actual content body", i+1)
- }
- if expectedReq.Header.Get("Content-Md5") != actualReq.Header.Get("Content-Md5") {
- t.Errorf("Test %d: Request body Md5 differs from the expected result", i+1)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 68a459f4a..833f1fe8f 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -17,12 +17,12 @@
package minio
import (
- "fmt"
"hash"
"io"
- "io/ioutil"
"math"
"os"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Verify if reader is *os.File
@@ -43,23 +43,6 @@ func isReadAt(reader io.Reader) (ok bool) {
return
}
-// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool {
- // If part not found should upload the part.
- if uploadReq.Part == nil {
- return true
- }
- // if size mismatches should upload the part.
- if objPart.Size != uploadReq.Part.Size {
- return true
- }
- // if md5sum mismatches should upload the part.
- if objPart.ETag != uploadReq.Part.ETag {
- return true
- }
- return false
-}
-
// optimalPartInfo - calculate the optimal part info for a given
// object size.
//
@@ -93,55 +76,6 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
-// hashCopyBuffer is identical to hashCopyN except that it doesn't take
-// any size argument but takes a buffer argument and reader should be
-// of io.ReaderAt interface.
-//
-// Stages reads from offsets into the buffer, if buffer is nil it is
-// initialized to optimalBufferSize.
-func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
- hashWriter := writer
- for _, v := range hashAlgorithms {
- hashWriter = io.MultiWriter(hashWriter, v)
- }
-
- // Buffer is nil, initialize.
- if buf == nil {
- buf = make([]byte, optimalReadBufferSize)
- }
-
- // Offset to start reading from.
- var readAtOffset int64
-
- // Following block reads data at an offset from the input
- // reader and copies data to into local temporary file.
- for {
- readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
- if rerr != nil {
- if rerr != io.EOF {
- return 0, rerr
- }
- }
- writeSize, werr := hashWriter.Write(buf[:readAtSize])
- if werr != nil {
- return 0, werr
- }
- if readAtSize != writeSize {
- return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
- }
- readAtOffset += int64(writeSize)
- size += int64(writeSize)
- if rerr == io.EOF {
- break
- }
- }
-
- for k, v := range hashAlgorithms {
- hashSums[k] = v.Sum(nil)
- }
- return size, err
-}
-
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
hashWriter := writer
@@ -168,10 +102,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return "", err
}
@@ -182,70 +116,3 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
}
return initMultipartUploadResult.UploadID, nil
}
-
-// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
-// or initiate a new multipart session if no current one found
-func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) {
- // A map of all uploaded parts.
- var partsInfo map[int]ObjectPart
- var err error
-
- uploadID, err := c.findUploadID(bucketName, objectName)
- if err != nil {
- return "", nil, err
- }
-
- if uploadID == "" {
- // Initiates a new multipart request
- uploadID, err = c.newUploadID(bucketName, objectName, metaData)
- if err != nil {
- return "", nil, err
- }
- } else {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
- // initiate a new multipart upload
- if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
- uploadID, err = c.newUploadID(bucketName, objectName, metaData)
- if err != nil {
- return "", nil, err
- }
- } else {
- return "", nil, err
- }
- }
- }
-
- // Allocate partsInfo if not done yet
- if partsInfo == nil {
- partsInfo = make(map[int]ObjectPart)
- }
-
- return uploadID, partsInfo, nil
-}
-
-// computeHash - Calculates hashes for an input read Seeker.
-func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
- hashWriter := ioutil.Discard
- for _, v := range hashAlgorithms {
- hashWriter = io.MultiWriter(hashWriter, v)
- }
-
- // If no buffer is provided, no need to allocate just use io.Copy.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return 0, err
- }
-
- // Seek back reader to the beginning location.
- if _, err := reader.Seek(0, 0); err != nil {
- return 0, err
- }
-
- for k, v := range hashAlgorithms {
- hashSums[k] = v.Sum(nil)
- }
- return size, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index 56978d427..32fa873d8 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -16,57 +16,7 @@
package minio
-import (
- "net/http"
-
- "github.com/minio/minio-go/pkg/s3utils"
-)
-
-// CopyObject - copy a source object into a new object with the provided name in the provided bucket
-func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
- if objectSource == "" {
- return ErrInvalidArgument("Object source cannot be empty.")
- }
-
- // customHeaders apply headers.
- customHeaders := make(http.Header)
- for _, cond := range cpCond.conditions {
- customHeaders.Set(cond.key, cond.value)
- }
-
- // Set copy source.
- customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
-
- // Execute PUT on objectName.
- resp, err := c.executeMethod("PUT", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeaders,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- // Decode copy response on success.
- cpObjRes := copyObjectResult{}
- err = xmlDecoder(resp.Body, &cpObjRes)
- if err != nil {
- return err
- }
-
- // Return nil on success.
- return nil
+// CopyObject - copy a source object into a new object
+func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
+ return c.ComposeObject(dst, []SourceInfo{src})
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
new file mode 100644
index 000000000..141b3e91c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
@@ -0,0 +1,46 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+// PutEncryptedObject - Encrypt and store object.
+func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+
+ if encryptMaterials == nil {
+ return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
+ }
+
+ if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
+ return 0, err
+ }
+
+ if metadata == nil {
+ metadata = make(map[string][]string)
+ }
+
+ // Set the necessary encryption headers, for future decryption.
+ metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
+ metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
+ metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
+
+ return c.putObjectMultipart(bucketName, objectName, encryptMaterials, -1, metadata, progress)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
index 09fec769d..81cdf5c2c 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -17,17 +17,9 @@
package minio
import (
- "crypto/md5"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
"mime"
"os"
"path/filepath"
- "sort"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -35,10 +27,10 @@ import (
// FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -59,11 +51,6 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// Save the file size.
fileSize := fileStat.Size()
- // Check for largest object size allowed.
- if fileSize > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
@@ -75,220 +62,5 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
objMetadata["Content-Type"] = []string{contentType}
-
- // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
- // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
- if s3utils.IsGoogleEndpoint(c.endpointURL) {
- if fileSize > int64(maxSinglePutObjectSize) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
- }
-
- // Small object upload is initiated for uploads for input data size smaller than 5MiB.
- if fileSize < minPartSize && fileSize >= 0 {
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
- }
-
- // Upload all large objects as multipart.
- n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "NotImplemented" {
- // If size of file is greater than '5GiB' fail.
- if fileSize > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
- }
- return n, err
- }
- return n, nil
-}
-
-// putObjectMultipartFromFile - Creates object from contents of *os.File
-//
-// NOTE: This function is meant to be used for readers with local
-// file as in *os.File. This function resumes by skipping all the
-// necessary parts which were already uploaded by verifying them
-// against MD5SUM of each individual parts. This function also
-// effectively utilizes file system capabilities of reading from
-// specific sections and not having to create temporary files.
-func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
- uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
- if err != nil {
- return 0, err
- }
-
- // Create a channel to communicate a part was uploaded.
- // Buffer this to 10000, the maximum number of parts allowed by S3.
- uploadedPartsCh := make(chan uploadedPartRes, 10000)
-
- // Create a channel to communicate which part to upload.
- // Buffer this to 10000, the maximum number of parts allowed by S3.
- uploadPartsCh := make(chan uploadPartReq, 10000)
-
- // Just for readability.
- lastPartNumber := totalPartsCount
-
- // Send each part through the partUploadCh to be uploaded.
- for p := 1; p <= totalPartsCount; p++ {
- part, ok := partsInfo[p]
- if ok {
- uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
- } else {
- uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
- }
- }
- close(uploadPartsCh)
-
- // Use three 'workers' to upload parts in parallel.
- for w := 1; w <= totalWorkers; w++ {
- go func() {
- // Deal with each part as it comes through the channel.
- for uploadReq := range uploadPartsCh {
- // Add hash algorithms that need to be calculated by computeHash()
- // In case of a non-v4 signature or https connection, sha256 is not needed.
- hashAlgos := make(map[string]hash.Hash)
- hashSums := make(map[string][]byte)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
-
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(uploadReq.PartNum-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if uploadReq.PartNum == lastPartNumber {
- readOffset = (fileSize - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
- var prtSize int64
- var err error
-
- prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
- // Create the part to be uploaded.
- verifyObjPart := ObjectPart{
- ETag: hex.EncodeToString(hashSums["md5"]),
- PartNumber: uploadReq.PartNum,
- Size: partSize,
- }
-
- // If this is the last part do not give it the full part size.
- if uploadReq.PartNum == lastPartNumber {
- verifyObjPart.Size = lastPartSize
- }
-
- // Verify if part should be uploaded.
- if shouldUploadPart(verifyObjPart, uploadReq) {
- // Proceed to upload the part.
- var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Error: err,
- }
- // Exit the goroutine.
- return
- }
- // Save successfully uploaded part metadata.
- uploadReq.Part = &objPart
- }
- // Return through the channel the part size.
- uploadedPartsCh <- uploadedPartRes{
- Size: verifyObjPart.Size,
- PartNum: uploadReq.PartNum,
- Part: uploadReq.Part,
- Error: nil,
- }
- }
- }()
- }
-
- // Retrieve each uploaded part once it is done.
- for u := 1; u <= totalPartsCount; u++ {
- uploadRes := <-uploadedPartsCh
- if uploadRes.Error != nil {
- return totalUploadedSize, uploadRes.Error
- }
- // Retrieve each uploaded part and store it to be completed.
- part := uploadRes.Part
- if part == nil {
- return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
- }
- // Update the total uploaded size.
- totalUploadedSize += uploadRes.Size
- // Update the progress bar if there is one.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
- return totalUploadedSize, err
- }
- }
- // Store the part to be completed.
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
- ETag: part.ETag,
- PartNumber: part.PartNumber,
- })
- }
-
- // Verify if we uploaded all data.
- if totalUploadedSize != fileSize {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
+ return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index 3a299f65b..1938378f8 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -18,204 +18,87 @@ package minio
import (
"bytes"
- "crypto/md5"
- "crypto/sha256"
- "encoding/hex"
"encoding/xml"
"fmt"
- "hash"
"io"
"io/ioutil"
"net/http"
"net/url"
- "os"
"sort"
"strconv"
"strings"
-)
-
-// Comprehensive put object operation involving multipart resumable uploads.
-//
-// Following code handles these types of readers.
-//
-// - *os.File
-// - *minio.Object
-// - Any reader which has a method 'ReadAt()'
-//
-// If we exhaust all the known types, code proceeds to use stream as
-// is where each part is re-downloaded, checksummed and verified
-// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
- if size > 0 && size > minPartSize {
- // Verify if reader is *os.File, then use file system functionalities.
- if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
- }
- // Verify if reader is *minio.Object or io.ReaderAt.
- // NOTE: Verification of object is kept for a specific purpose
- // while it is going to be duck typed similar to io.ReaderAt.
- // It is to indicate that *minio.Object implements io.ReaderAt.
- // and such a functionality is used in the subsequent code
- // path.
- if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
- }
- }
- // For any other data size and reader type we do generic multipart
- // approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
-}
-
-// putObjectMultipartStreamNoChecksum - upload a large object using
-// multipart upload and streaming signature for signing payload.
-// N B We don't resume an incomplete multipart upload, we overwrite
-// existing parts of an incomplete upload.
-func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
- reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
- uploadID, err := c.findUploadID(bucketName, objectName)
- if err != nil {
- return 0, err
- }
- if uploadID == "" {
- // Initiates a new multipart request
- uploadID, err = c.newUploadID(bucketName, objectName, metadata)
- if err != nil {
- return 0, err
- }
- }
+ "github.com/minio/minio-go/pkg/s3utils"
+)
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
+ metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, size, metadata, progress)
if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Initialize parts uploaded map.
- partsInfo := make(map[int]ObjectPart)
-
- // Part number always starts with '1'.
- var partNumber int
- for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- hookReader := newHook(reader, progress)
-
- // Proceed to upload the part.
- if partNumber == totalPartsCount {
- partSize = lastPartSize
- }
-
- var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID,
- io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
- // For unknown size, Read EOF we break away.
- // We do not have to upload till totalPartsCount.
- if err == io.EOF && size < 0 {
- break
- }
-
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
-
- // Save successfully uploaded size.
- totalUploadedSize += partSize
- }
-
- // Verify if we uploaded all the data.
- if size > 0 {
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
- }
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // Loop over total uploaded parts to save them in
- // Parts array before completing the multipart request.
- for i := 1; i < partNumber; i++ {
- part, ok := partsInfo[i]
- if !ok {
- return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
- ETag: part.ETag,
- PartNumber: part.PartNumber,
- })
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
}
-
- // Return final size.
- return totalUploadedSize, nil
+ return n, err
}
-// putObjectStream uploads files bigger than 64MiB, and also supports
-// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, size int64,
+ metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
- // Total data read and written to server. should be equal to 'size' at the end of the call.
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
- uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(size)
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
// Part number always starts with '1'.
partNumber := 1
// Initialize a temporary buffer.
tmpBuffer := new(bytes.Buffer)
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
for partNumber <= totalPartsCount {
- // Choose hash algorithms to be calculated by hashCopyN, avoid sha256
- // with non-v4 signature request or HTTPS connection
- hashSums := make(map[string][]byte)
- hashAlgos := make(map[string]hash.Hash)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
+ // Choose hash algorithms to be calculated by hashCopyN,
+ // avoid sha256 with non-v4 signature request or
+ // HTTPS connection.
+ hashAlgos, hashSums := c.hashMaterials()
// Calculates hash sums while copying partSize bytes into tmpBuffer.
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
@@ -228,33 +111,19 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// as we read from the source.
reader = newHook(tmpBuffer, progress)
- part, ok := partsInfo[partNumber]
-
- // Verify if part should be uploaded.
- if !ok || shouldUploadPart(ObjectPart{
- ETag: hex.EncodeToString(hashSums["md5"]),
- PartNumber: partNumber,
- Size: prtSize,
- }, uploadPartReq{PartNum: partNumber, Part: &part}) {
- // Proceed to upload the part.
- var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
- if err != nil {
- // Reset the temporary buffer upon any error.
- tmpBuffer.Reset()
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
- }
- }
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
+ hashSums["md5"], hashSums["sha256"], prtSize, metadata)
+ if err != nil {
+ // Reset the temporary buffer upon any error.
+ tmpBuffer.Reset()
+ return totalUploadedSize, err
}
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
// Reset the temporary buffer.
tmpBuffer.Reset()
@@ -293,8 +162,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
+ if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
@@ -303,12 +171,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
@@ -318,14 +186,14 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
// Set ContentType header.
customHeader := make(http.Header)
- for k, v := range metaData {
+ for k, v := range metadata {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// Set a default content-type header if the latter is not provided
- if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
@@ -356,13 +224,16 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
return initiateMultipartUploadResult, nil
}
+const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
+
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
+func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
+ partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectPart{}, err
}
if size > maxPartSize {
@@ -385,10 +256,21 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
// Set upload id.
urlValues.Set("uploadId", uploadID)
+ // Set encryption headers, if any.
+ customHeader := make(http.Header)
+ for k, v := range metadata {
+ if len(v) > 0 {
+ if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
+ customHeader.Set(k, v[0])
+ }
+ }
+ }
+
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
+ customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
@@ -417,12 +299,13 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
+func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
+ complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
deleted file mode 100644
index f3844127e..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-progress.go
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "io"
- "strings"
-
- "github.com/minio/minio-go/pkg/encrypt"
- "github.com/minio/minio-go/pkg/s3utils"
-)
-
-// PutObjectWithProgress - with progress.
-func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
- metaData := make(map[string][]string)
- metaData["Content-Type"] = []string{contentType}
- return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
-}
-
-// PutEncryptedObject - Encrypt and store object.
-func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) {
-
- if encryptMaterials == nil {
- return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
- }
-
- if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
- return 0, err
- }
-
- if metaData == nil {
- metaData = make(map[string][]string)
- }
-
- // Set the necessary encryption headers, for future decryption.
- metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()}
- metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()}
- metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
-
- return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress)
-}
-
-// PutObjectWithMetadata - with metadata.
-func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if reader == nil {
- return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
- }
-
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
-
- // Check for largest object size allowed.
- if size > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
- // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
- // So we fall back to single PUT operation with the maximum limit of 5GiB.
- if s3utils.IsGoogleEndpoint(c.endpointURL) {
- if size <= -1 {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
- }
-
- // putSmall object.
- if size < minPartSize && size >= 0 {
- return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
- }
- // For all sizes greater than 5MiB do multipart.
- n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
- // Verify if size of reader is greater than '5GiB'.
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
- }
- return n, err
- }
- return n, nil
-}
-
-// PutObjectStreaming using AWS streaming signature V4
-func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
- return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil)
-}
-
-// PutObjectStreamingWithMetadata using AWS streaming signature V4
-func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) {
- return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil)
-}
-
-// PutObjectStreamingWithProgress using AWS streaming signature V4
-func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- // NOTE: Streaming signature is not supported by GCS.
- if s3utils.IsGoogleEndpoint(c.endpointURL) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "AWS streaming signature v4 is not supported with Google Cloud Storage",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // This method should return error with signature v2 minioClient.
- if c.signature.isV2() {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
- Key: objectName,
- BucketName: bucketName,
- }
- }
-
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
-
- // Check for largest object size allowed.
- if size > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
- // If size cannot be found on a stream, it is not possible
- // to upload using streaming signature, fall back to multipart.
- if size < 0 {
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
- }
-
- // Set signature type to streaming signature v4.
- c.signature = SignatureV4Streaming
-
- if size < minPartSize && size >= 0 {
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
- }
-
- // For all sizes greater than 64MiB do multipart.
- n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
- // Verify if size of reader is greater than '5GiB'.
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
- }
- return n, err
- }
-
- return n, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
deleted file mode 100644
index ebf422638..000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-readat.go
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "crypto/md5"
- "crypto/sha256"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
- "sort"
-)
-
-// uploadedPartRes - the response received from a part upload.
-type uploadedPartRes struct {
- Error error // Any error encountered while uploading the part.
- PartNum int // Number of the part uploaded.
- Size int64 // Size of the part uploaded.
- Part *ObjectPart
-}
-
-type uploadPartReq struct {
- PartNum int // Number of the part uploaded.
- Part *ObjectPart // Size of the part uploaded.
-}
-
-// shouldUploadPartReadAt - verify if part should be uploaded.
-func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
- // If part not found part should be uploaded.
- if uploadReq.Part == nil {
- return true
- }
- // if size mismatches part should be uploaded.
- if uploadReq.Part.Size != objPart.Size {
- return true
- }
- return false
-}
-
-// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
-// of type which implements io.ReaderAt interface (ReadAt method).
-//
-// NOTE: This function is meant to be used for all readers which
-// implement io.ReaderAt which allows us for resuming multipart
-// uploads but reading at an offset, which would avoid re-read the
-// data which was already uploaded. Internally this function uses
-// temporary files for staging all the data, these temporary files are
-// cleaned automatically when the caller i.e http client closes the
-// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
- uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
- if err != nil {
- return 0, err
- }
-
- // Used for readability, lastPartNumber is always totalPartsCount.
- lastPartNumber := totalPartsCount
-
- // Declare a channel that sends the next part number to be uploaded.
- // Buffered to 10000 because thats the maximum number of parts allowed
- // by S3.
- uploadPartsCh := make(chan uploadPartReq, 10000)
-
- // Declare a channel that sends back the response of a part upload.
- // Buffered to 10000 because thats the maximum number of parts allowed
- // by S3.
- uploadedPartsCh := make(chan uploadedPartRes, 10000)
-
- // Send each part number to the channel to be processed.
- for p := 1; p <= totalPartsCount; p++ {
- part, ok := partsInfo[p]
- if ok {
- uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
- } else {
- uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
- }
- }
- close(uploadPartsCh)
-
- // Receive each part number from the channel allowing three parallel uploads.
- for w := 1; w <= totalWorkers; w++ {
- go func() {
- // Read defaults to reading at 5MiB buffer.
- readAtBuffer := make([]byte, optimalReadBufferSize)
-
- // Each worker will draw from the part channel and upload in parallel.
- for uploadReq := range uploadPartsCh {
- // Declare a new tmpBuffer.
- tmpBuffer := new(bytes.Buffer)
-
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(uploadReq.PartNum-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if uploadReq.PartNum == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
-
- // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
- // Sha256 is avoided in non-v4 signature requests or HTTPS connections
- hashSums := make(map[string][]byte)
- hashAlgos := make(map[string]hash.Hash)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
-
- var prtSize int64
- var err error
- prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- // Send the error back through the channel.
- uploadedPartsCh <- uploadedPartRes{
- Size: 0,
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
- // Verify object if its uploaded.
- verifyObjPart := ObjectPart{
- PartNumber: uploadReq.PartNum,
- Size: partSize,
- }
- // Special case if we see a last part number, save last part
- // size as the proper part size.
- if uploadReq.PartNum == lastPartNumber {
- verifyObjPart.Size = lastPartSize
- }
-
- // Only upload the necessary parts. Otherwise return size through channel
- // to update any progress bar.
- if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
- // Proceed to upload the part.
- var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Size: 0,
- Error: err,
- }
- // Exit the goroutine.
- return
- }
- // Save successfully uploaded part metadata.
- uploadReq.Part = &objPart
- }
- // Send successful part info through the channel.
- uploadedPartsCh <- uploadedPartRes{
- Size: verifyObjPart.Size,
- PartNum: uploadReq.PartNum,
- Part: uploadReq.Part,
- Error: nil,
- }
- }
- }()
- }
-
- // Gather the responses as they occur and update any
- // progress bar.
- for u := 1; u <= totalPartsCount; u++ {
- uploadRes := <-uploadedPartsCh
- if uploadRes.Error != nil {
- return totalUploadedSize, uploadRes.Error
- }
- // Retrieve each uploaded part and store it to be completed.
- // part, ok := partsInfo[uploadRes.PartNum]
- part := uploadRes.Part
- if part == nil {
- return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
- }
- // Update the totalUploadedSize.
- totalUploadedSize += uploadRes.Size
- // Update the progress bar if there is one.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
- return totalUploadedSize, err
- }
- }
- // Store the parts to be completed in order.
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
- ETag: part.ETag,
- PartNumber: part.PartNumber,
- })
- }
-
- // Verify if we uploaded all the data.
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
new file mode 100644
index 000000000..0d4639e83
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -0,0 +1,436 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// PutObjectStreaming using AWS streaming signature V4
+func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
+ return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
+}
+
+// putObjectMultipartStream - upload a large object using
+// multipart upload and streaming signature for signing payload.
+// Comprehensive put object operation involving multipart uploads.
+//
+// Following code handles these types of readers.
+//
+// - *os.File
+// - *minio.Object
+// - Any reader which has a method 'ReadAt()'
+//
+func (c Client) putObjectMultipartStream(bucketName, objectName string,
+ reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+
+ // Verify if reader is *minio.Object, *os.File or io.ReaderAt.
+ // NOTE: Verification of object is kept for a specific purpose
+ // while it is going to be duck typed similar to io.ReaderAt.
+ // It is to indicate that *minio.Object implements io.ReaderAt.
+ // and such a functionality is used in the subsequent code path.
+ if isFile(reader) || !isObject(reader) && isReadAt(reader) {
+ n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
+ } else {
+ n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ }
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ }
+ }
+ return n, err
+}
+
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+ Part *ObjectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part *ObjectPart // Size of the part uploaded.
+}
+
+// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
+// Supports all readers which implements io.ReaderAt interface
+// (ReadAt method).
+//
+// NOTE: This function is meant to be used for all readers which
+// implement io.ReaderAt which allows us for resuming multipart
+// uploads but reading at an offset, which would avoid re-read the
+// data which was already uploaded. Internally this function uses
+// temporary files for staging all the data, these temporary files are
+// cleaned automatically when the caller i.e http client closes the
+// stream after uploading all the contents successfully.
+func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
+ reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload in progress, if the
+ // function returns any error, since we do not resume
+ // we should purge the parts which have been uploaded
+ // to relinquish storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Declare a channel that sends the next part number to be uploaded.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadPartsCh := make(chan uploadPartReq, 10000)
+
+ // Declare a channel that sends back the response of a part upload.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
+
+ // Used for readability, lastPartNumber is always totalPartsCount.
+ lastPartNumber := totalPartsCount
+
+ // Send each part number to the channel to be processed.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
+ close(uploadPartsCh)
+
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= totalWorkers; w++ {
+ go func() {
+ // Each worker will draw from the part channel and upload in parallel.
+ for uploadReq := range uploadPartsCh {
+
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ partSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ sectionReader, uploadReq.PartNum,
+ nil, nil, partSize, metadata)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ uploadReq.Part = &objPart
+
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: objPart.Size,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
+ Error: nil,
+ }
+ }
+ }()
+ }
+
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ // part, ok := partsInfo[uploadRes.PartNum]
+ part := uploadRes.Part
+ if part == nil {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ // Store the parts to be completed in order.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Verify if we uploaded all the data.
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
+ reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ hookReader := newHook(reader, progress)
+
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ io.LimitReader(hookReader, partSize),
+ partNumber, nil, nil, partSize, metadata)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partSize
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// putObjectNoChecksum special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Size -1 is only supported on Google Cloud Storage, we error
+ // out in all other situations.
+ if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ return 0, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+ if size > 0 {
+ if isReadAt(reader) && !isObject(reader) {
+ reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
+ }
+ }
+
+ // Update progress reader appropriately to the latest offset as we
+ // read from the source.
+ readSeeker := newHook(reader, progress)
+
+ // This function does not calculate sha256 and md5sum for payload.
+ // Execute put object.
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
+ if err != nil {
+ return 0, err
+ }
+ if st.Size != size {
+ return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+ }
+ return size, nil
+}
+
+// putObjectDo - executes the put object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Set headers.
+ customHeader := make(http.Header)
+
+ // Set metadata to headers
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // If Content-Type is not provided, set the default application/octet-stream one
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Bytes: md5Sum,
+ contentSHA256Bytes: sha256Sum,
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ var objInfo ObjectInfo
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
+ // A success here means data was written to server successfully.
+ objInfo.Size = size
+
+ // Return here.
+ return objInfo, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index e218075df..2ea498789 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -17,17 +17,14 @@
package minio
import (
- "bytes"
- "crypto/md5"
- "crypto/sha256"
- "hash"
"io"
- "io/ioutil"
- "net/http"
"os"
"reflect"
"runtime"
"strings"
+
+ "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// toInt - converts go value to its integer representation based
@@ -109,14 +106,24 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
case "|0", "|1":
return
}
- size = st.Size()
+ var pos int64
+ pos, err = v.Seek(0, 1) // SeekCurrent.
+ if err != nil {
+ return -1, err
+ }
+ size = st.Size() - pos
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
- size = st.Size
+ var pos int64
+ pos, err = v.Seek(0, 1) // SeekCurrent.
+ if err != nil {
+ return -1, err
+ }
+ size = st.Size - pos
}
}
// Returns the size here.
@@ -135,184 +142,77 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
-// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
-// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
-// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
-// Maximum object size that can be uploaded through this operation will be 5TiB.
-//
-// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
-// So we fall back to single PUT operation with the maximum limit of 5GiB.
-//
+// - For size smaller than 64MiB PutObject automatically does a
+// single atomic Put operation.
+// - For size larger than 64MiB PutObject automatically does a
+// multipart Put operation.
+// - For size input as -1 PutObject does a multipart Put operation
+// until input stream reaches EOF. Maximum object size that can
+// be uploaded through this operation will be 5TiB.
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
+ return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
+ "Content-Type": []string{contentType},
+ }, nil)
}
-// putObjectNoChecksum special function used Google Cloud Storage. This special function
-// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
-
- // Update progress reader appropriately to the latest offset as we
- // read from the source.
- readSeeker := newHook(reader, progress)
-
- // This function does not calculate sha256 and md5sum for payload.
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
- if err != nil {
- return 0, err
- }
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
- }
- return size, nil
+// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
+// but takes the size argument explicitly, this function avoids doing reflection
+// internally to figure out the size of input stream. Also if the input size is
+// lesser than 0 this function returns an error.
+func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
}
-// putObjectSingle is a special function for uploading single put object request.
-// This special function is used as a fallback when multipart upload fails.
-func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // If size is a stream, upload up to 5GiB.
- if size <= -1 {
- size = maxSinglePutObjectSize
- }
+// PutObjectWithMetadata using AWS streaming signature V4
+func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
+}
- // Add the appropriate hash algorithms that need to be calculated by hashCopyN
- // In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
- hashAlgos := make(map[string]hash.Hash)
- hashSums := make(map[string][]byte)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
+// PutObjectWithProgress using AWS streaming signature V4
+func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Size of the object.
+ var size int64
- if size <= minPartSize {
- // Initialize a new temporary buffer.
- tmpBuffer := new(bytes.Buffer)
- size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
- reader = bytes.NewReader(tmpBuffer.Bytes())
- tmpBuffer.Reset()
- } else {
- // Initialize a new temporary file.
- var tmpFile *tempFile
- tmpFile, err = newTempFile("single$-putobject-single")
- if err != nil {
- return 0, err
- }
- defer tmpFile.Close()
- size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
- if err != nil {
- return 0, err
- }
- // Seek back to beginning of the temporary file.
- if _, err = tmpFile.Seek(0, 0); err != nil {
- return 0, err
- }
- reader = tmpFile
- }
- // Return error if its not io.EOF.
- if err != nil && err != io.EOF {
- return 0, err
- }
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
+ // Get reader size.
+ size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
- }
- // Progress the reader to the size if putObjectDo is successful.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
- return size, err
- }
- }
- return size, nil
+ return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
}
-// putObjectDo - executes the put object http operation.
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return ObjectInfo{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return ObjectInfo{}, err
- }
-
- if size <= -1 {
- return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
+func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Check for largest object size allowed.
+ if size > int64(maxMultipartPutObjectSize) {
+ return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
- if size > maxSinglePutObjectSize {
- return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ // NOTE: Streaming signature is not supported by GCS.
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
+ // Do not compute MD5 for Google Cloud Storage.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
- // Set headers.
- customHeader := make(http.Header)
-
- // Set metadata to headers
- for k, v := range metaData {
- if len(v) > 0 {
- customHeader.Set(k, v[0])
+ if c.overrideSignerType.IsV2() {
+ if size > 0 && size < minPartSize {
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
+ return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
}
- // If Content-Type is not provided, set the default application/octet-stream one
- if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
- customHeader.Set("Content-Type", "application/octet-stream")
+ // If size cannot be found on a stream, it is not possible
+ // to upload using streaming signature, fall back to multipart.
+ if size < 0 {
+ return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
}
- // Populate request metadata.
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
- }
+ // Set streaming signature.
+ c.overrideSignerType = credentials.SignatureV4Streaming
- // Execute PUT an objectName.
- resp, err := c.executeMethod("PUT", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return ObjectInfo{}, err
+ if size < minPartSize {
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- var objInfo ObjectInfo
- // Trim off the odd double quotes from ETag in the beginning and end.
- objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
- // A success here means data was written to server successfully.
- objInfo.Size = size
- // Return here.
- return objInfo, nil
+ // For all sizes greater than 64MiB do multipart.
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index 73790f002..3574cbc1a 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -22,6 +22,8 @@ import (
"io"
"net/http"
"net/url"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// RemoveBucket deletes the bucket name.
@@ -30,7 +32,7 @@ import (
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Execute DELETE on bucket.
@@ -57,10 +59,10 @@ func (c Client) RemoveBucket(bucketName string) error {
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Execute DELETE on objectName.
@@ -132,7 +134,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: err,
@@ -174,7 +176,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
}
}
if count == 0 {
- // Multi Objects Delete API doesn't accept empty object list, quit immediatly
+ // Multi Objects Delete API doesn't accept empty object list, quit immediately
break
}
if count < maxEntries {
@@ -212,10 +214,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
@@ -237,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
index ec63d6b94..4b297407b 100644
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -36,8 +36,8 @@ type owner struct {
ID string
}
-// commonPrefix container for prefix response.
-type commonPrefix struct {
+// CommonPrefix container for prefix response.
+type CommonPrefix struct {
Prefix string
}
@@ -45,7 +45,7 @@ type commonPrefix struct {
type ListBucketV2Result struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
- CommonPrefixes []commonPrefix
+ CommonPrefixes []CommonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
@@ -74,7 +74,7 @@ type ListBucketV2Result struct {
type ListBucketResult struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
- CommonPrefixes []commonPrefix
+ CommonPrefixes []CommonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
@@ -116,7 +116,7 @@ type ListMultipartUploadsResult struct {
Prefix string
Delimiter string
// A response can contain CommonPrefixes only if you specify a delimiter.
- CommonPrefixes []commonPrefix
+ CommonPrefixes []CommonPrefix
}
// initiator container for who initiated multipart upload.
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 5b3dfe1b4..4b530327b 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -28,7 +28,7 @@ import (
// BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return false, err
}
@@ -55,11 +55,13 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
// List of header keys to be filtered, usually
// from all S3 API http responses.
var defaultFilterKeys = []string{
+ "Connection",
"Transfer-Encoding",
"Accept-Ranges",
"Date",
"Server",
"Vary",
+ "x-amz-bucket-region",
"x-amz-request-id",
"x-amz-id-2",
// Add new headers to be ignored.
@@ -80,10 +82,10 @@ func extractObjMetadata(header http.Header) http.Header {
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
reqHeaders := NewHeadReqHeaders()
@@ -93,10 +95,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Lower level API for statObject supporting pre-conditions and range headers.
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
@@ -126,12 +128,13 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
- // Content-Length is not valid for Google Cloud Storage, do not verify.
+ // Parse content length is exists
var size int64 = -1
- if !s3utils.IsGoogleEndpoint(c.endpointURL) {
- // Parse content length.
- size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ contentLengthStr := resp.Header.Get("Content-Length")
+ if contentLengthStr != "" {
+ size, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
+ // Content-Length is not valid
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index a563a18d4..6fe508aa1 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,23 +19,27 @@ package minio
import (
"bytes"
+ "crypto/md5"
+ "crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
+ "hash"
"io"
"io/ioutil"
"math/rand"
+ "net"
"net/http"
"net/http/httputil"
"net/url"
"os"
- "regexp"
"runtime"
"strings"
"sync"
"time"
+ "github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -46,14 +51,11 @@ type Client struct {
// Parsed endpoint url provided by the user.
endpointURL url.URL
- // AccessKeyID required for authorized requests.
- accessKeyID string
- // SecretAccessKey required for authorized requests.
- secretAccessKey string
- // Choose a signature type if necessary.
- signature SignatureType
- // Set to 'true' if Client has no access and secret keys.
- anonymous bool
+ // Holds various credential providers.
+ credsProvider *credentials.Credentials
+
+ // Custom signerType value overrides all credentials.
+ overrideSignerType credentials.SignatureType
// User supplied.
appInfo struct {
@@ -85,7 +87,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "2.1.0"
+ libraryVersion = "3.0.0"
)
// User Agent should always following the below style.
@@ -100,58 +102,58 @@ const (
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
-
- // Set to use signature version '2'.
- clnt.signature = SignatureV2
+ clnt.overrideSignerType = credentials.SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
-
- // Set to use signature version '4'.
- clnt.signature = SignatureV4
+ clnt.overrideSignerType = credentials.SignatureV4
return clnt, nil
}
-// New - instantiate minio client Client, adds automatic verification of signature.
+// New - instantiate minio client, adds automatic verification of signature.
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
- return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "")
-}
-
-// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
-// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
-// Use this function when if your application deals with single region.
-func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
-
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV2
+ clnt.overrideSignerType = credentials.SignatureV2
}
-
- // If Amazon S3 set to signature v2.n
+ // If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV4
+ clnt.overrideSignerType = credentials.SignatureV4
}
+ return clnt, nil
+}
- // Sets custom region, if region is empty bucket location cache is used automatically.
- clnt.region = region
+// NewWithCredentials - instantiate minio client with credentials provider
+// for retrieving credentials from various credentials provider such as
+// IAM, File, Env etc.
+func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
+ return privateNew(endpoint, creds, secure, region)
+}
- // Success..
- return clnt, nil
+// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
+// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
+// Use this function when if your application deals with single region.
+func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ return privateNew(endpoint, creds, secure, region)
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
@@ -188,7 +190,7 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
return nil
}
-func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
@@ -197,8 +199,9 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
// instantiate new Client.
clnt := new(Client)
- clnt.accessKeyID = accessKeyID
- clnt.secretAccessKey = secretAccessKey
+
+ // Save the credentials.
+ clnt.credsProvider = creds
// Remember whether we are using https or not
clnt.secure = secure
@@ -212,7 +215,10 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
CheckRedirect: redirectHeaders,
}
- // Instantiae bucket location cache.
+ // Sets custom region, if region is empty bucket location cache is used automatically.
+ clnt.region = region
+
+ // Instantiate bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
@@ -286,6 +292,29 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
}
}
+// Hash materials provides relevant initialized hash algo writers
+// based on the expected signature type.
+//
+// - For signature v4 request if the connection is insecure compute only sha256.
+// - For signature v4 request if the connection is secure compute only md5.
+// - For anonymous request compute md5.
+func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
+ hashSums = make(map[string][]byte)
+ hashAlgos = make(map[string]hash.Hash)
+ if c.overrideSignerType.IsV4() {
+ if c.secure {
+ hashAlgos["md5"] = md5.New()
+ } else {
+ hashAlgos["sha256"] = sha256.New()
+ }
+ } else {
+ if c.overrideSignerType.IsAnonymous() {
+ hashAlgos["md5"] = md5.New()
+ }
+ }
+ return hashAlgos, hashSums
+}
+
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
@@ -306,40 +335,6 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
-// regCred matches credential string in HTTP header
-var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
-
-// regCred matches signature string in HTTP header
-var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
-
-// Filter out signature value from Authorization header.
-func (c Client) filterSignature(req *http.Request) {
- if _, ok := req.Header["Authorization"]; !ok {
- return
- }
- // Handle if Signature V2.
- if c.signature.isV2() {
- // Set a temporary redacted auth
- req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
- return
- }
-
- /// Signature V4 authorization header.
-
- // Save the original auth.
- origAuth := req.Header.Get("Authorization")
- // Strip out accessKeyID from:
- // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
- newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
-
- // Strip out 256-bit signature from: Signature=<256-bit signature>
- newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
-
- // Set a temporary redacted auth
- req.Header.Set("Authorization", newAuth)
- return
-}
-
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
@@ -349,7 +344,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
}
// Filter out Signature field from Authorization header.
- c.filterSignature(req)
+ origAuth := req.Header.Get("Authorization")
+ if origAuth != "" {
+ req.Header.Set("Authorization", redactSignature(origAuth))
+ }
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
@@ -478,6 +476,13 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
+ // Figure out if the body can be closed - if yes
+ // we will definitely close it upon the function
+ // return.
+ bodyCloser, ok := metadata.contentBody.(io.Closer)
+ if ok {
+ defer bodyCloser.Close()
+ }
}
// Create a done channel to control 'newRetryTimer' go routine.
@@ -553,9 +558,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
// with the new region.
- if errResponse.Code == "InvalidRegion" && errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
+ //
+ // Additionally we should only retry if bucketLocation and custom
+ // region is empty.
+ if metadata.bucketLocation == "" && c.region == "" {
+ if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
}
// Verify if error response code is retryable.
@@ -581,53 +591,72 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
method = "POST"
}
- // Default all requests to "us-east-1" or "cn-north-1" (china region)
- location := "us-east-1"
- if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- location = "cn-north-1"
- }
-
- // Gather location only if bucketName is present.
- if metadata.bucketName != "" {
- location, err = c.getBucketLocation(metadata.bucketName)
- if err != nil {
- return nil, err
+ location := metadata.bucketLocation
+ if location == "" {
+ if metadata.bucketName != "" {
+ // Gather location only if bucketName is present.
+ location, err = c.getBucketLocation(metadata.bucketName)
+ if err != nil {
+ if ToErrorResponse(err).Code != "AccessDenied" {
+ return nil, err
+ }
+ }
+ // Upon AccessDenied error on fetching bucket location, default
+ // to possible locations based on endpoint URL. This can usually
+ // happen when GetBucketLocation() is disabled using IAM policies.
+ }
+ if location == "" {
+ location = getDefaultLocation(c.endpointURL, c.region)
}
}
- // Save location.
- metadata.bucketLocation = location
-
// Construct a new target URL.
- targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
- req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody)
+ req, err = http.NewRequest(method, targetURL.String(), nil)
if err != nil {
return nil, err
}
- // Anonymous request.
- anonymous := c.accessKeyID == "" || c.secretAccessKey == ""
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
- if anonymous {
- return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
+ if signerType.IsAnonymous() {
+ return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
- if c.signature.isV2() {
+ if signerType.IsV2() {
// Presign URL with signature v2.
- req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
- } else if c.signature.isV4() {
+ req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
+ } else if signerType.IsV4() {
// Presign URL with signature v4.
- req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
}
return req, nil
}
@@ -640,9 +669,21 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set(k, v[0])
}
- // set incoming content-length.
- if metadata.contentLength > 0 {
- req.ContentLength = metadata.contentLength
+ // Go net/http notoriously closes the request body.
+ // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
+ // This can cause underlying *os.File seekers to fail, avoid that
+ // by making sure to wrap the closer as a nop.
+ if metadata.contentLength == 0 {
+ req.Body = nil
+ } else {
+ req.Body = ioutil.NopCloser(metadata.contentBody)
+ }
+
+ // Set incoming content-length.
+ req.ContentLength = metadata.contentLength
+ if req.ContentLength <= -1 {
+ // For unknown content length, we upload using transfer-encoding: chunked.
+ req.TransferEncoding = []string{"chunked"}
}
// set md5Sum for content protection.
@@ -650,17 +691,18 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
- if anonymous {
+ // For anonymous requests just return.
+ if signerType.IsAnonymous() {
return req, nil
- } // Sign the request for all authenticated requests.
+ }
switch {
- case c.signature.isV2():
+ case signerType.IsV2():
// Add signature version '2' authorization header.
- req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
- case c.signature.isStreamingV4() && method == "PUT":
- req = s3signer.StreamingSignV4(req, c.accessKeyID,
- c.secretAccessKey, location, metadata.contentLength, time.Now().UTC())
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ case signerType.IsStreamingV4() && method == "PUT":
+ req = s3signer.StreamingSignV4(req, accessKeyID,
+ secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
@@ -670,7 +712,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
// Add signature version '4' authorization header.
- req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
}
// Return request.
@@ -701,14 +743,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
- // Fetch new host based on the bucket location.
- host = getS3Endpoint(bucketLocation)
+ // Do not change the host if the endpoint URL is a FIPS S3 endpoint.
+ if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation)
+ }
}
}
// Save scheme.
scheme := c.endpointURL.Scheme
+ // Strip port 80 and 443 so we won't send these ports in Host header.
+ // The reason is that browsers and curl automatically remove :80 and :443
+ // with the generated presigned urls, then a signature mismatch error.
+ if h, p, err := net.SplitHostPort(host); err == nil {
+ if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
+ host = h
+ }
+ }
+
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
@@ -732,13 +786,16 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
}
}
}
+
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
+
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
+
return u, nil
}
diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
index 7e5933778..e81596ecf 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v2_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
@@ -21,10 +21,13 @@ import (
"errors"
"io"
"io/ioutil"
+ "log"
"math/rand"
"net/http"
"net/url"
"os"
+ "reflect"
+ "strings"
"testing"
"time"
@@ -36,7 +39,7 @@ func TestMakeBucketErrorV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
- if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
t.Skip("skipping region functional tests for non s3 runs")
}
@@ -45,10 +48,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -91,10 +94,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -171,10 +174,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -229,119 +232,6 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
}
}
-// Tests resumable put object cloud to cloud.
-func TestResumablePutObjectV2(t *testing.T) {
- // By passing 'go test -short' skips these tests.
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Create a temporary file.
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
- // Copy 11MiB worth of random data.
- n, err := io.CopyN(file, r, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- t.Fatal("Error:", err)
- }
-
- // New object name.
- objectName := bucketName + "-resumable"
-
- // Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Get the uploaded object.
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Get object info.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != objInfo.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
- }
-
- // Remove all temp files, objects and bucket.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-put")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
// Tests FPutObject hidden contentType setting
func TestFPutObjectV2(t *testing.T) {
if testing.Short() {
@@ -353,10 +243,10 @@ func TestFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -491,90 +381,12 @@ func TestFPutObjectV2(t *testing.T) {
}
-// Tests resumable file based put object multipart upload.
-func TestResumableFPutObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
- n, err := io.CopyN(file, r, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- objectName := bucketName + "-resumable"
-
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- file.Close()
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
// Tests various bucket supported formats.
func TestMakeBucketRegionsV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
- if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
t.Skip("skipping region functional tests for non s3 runs")
}
@@ -583,10 +395,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -634,10 +446,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -767,10 +579,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -903,10 +715,10 @@ func TestCopyObjectV2(t *testing.T) {
// Instantiate new minio client object
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -948,18 +760,19 @@ func TestCopyObjectV2(t *testing.T) {
len(buf), n)
}
- // Set copy conditions.
- copyConds := CopyConditions{}
- err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ dst, err := NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
if err != nil {
- t.Fatal("Error:", err)
+ t.Fatal(err)
}
- // Copy source.
- copySource := bucketName + "/" + objectName
+ src := NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
// Perform the Copy
- err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ err = c.CopyObject(dst, src)
if err != nil {
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
}
@@ -1020,10 +833,10 @@ func TestFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := NewV2(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1297,3 +1110,361 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: ", err)
}
}
+
+func testComposeObjectErrorCases(c *Client, t *testing.T) {
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Test that more than 10K source objects cannot be
+ // concatenated.
+ srcArr := [10001]SourceInfo{}
+ srcSlice := srcArr[:]
+ dst, err := NewDestinationInfo(bucketName, "object", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := c.ComposeObject(dst, srcSlice); err == nil {
+ t.Fatal("Error was expected.")
+ } else if err.Error() != "There must be as least one and upto 10000 source objects." {
+ t.Fatal("Got unexpected error: ", err)
+ }
+
+ // Create a source with invalid offset spec and check that
+ // error is returned:
+ // 1. Create the source object.
+ const badSrcSize = 5 * 1024 * 1024
+ buf := bytes.Repeat([]byte("1"), badSrcSize)
+ _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // 2. Set invalid range spec on the object (going beyond
+ // object size)
+ badSrc := NewSourceInfo(bucketName, "badObject", nil)
+ err = badSrc.SetRange(1, badSrcSize)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // 3. ComposeObject call should fail.
+ if err := c.ComposeObject(dst, []SourceInfo{badSrc}); err == nil {
+ t.Fatal("Error was expected.")
+ } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
+ t.Fatal("Got unexpected error: ", err)
+ }
+}
+
+// Test expected error cases
+func TestComposeObjectErrorCasesV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ testComposeObjectErrorCases(c, t)
+}
+
+func testComposeMultipleSources(c *Client, t *testing.T) {
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Upload a small source object
+ const srcSize = 1024 * 1024 * 5
+ buf := bytes.Repeat([]byte("1"), srcSize)
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // We will append 10 copies of the object.
+ srcs := []SourceInfo{}
+ for i := 0; i < 10; i++ {
+ srcs = append(srcs, NewSourceInfo(bucketName, "srcObject", nil))
+ }
+ // make the last part very small
+ err = srcs[9].SetRange(0, 0)
+ if err != nil {
+ t.Fatal("unexpected error:", err)
+ }
+
+ dst, err := NewDestinationInfo(bucketName, "dstObject", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ComposeObject(dst, srcs)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ objProps, err := c.StatObject(bucketName, "dstObject")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ if objProps.Size != 9*srcSize+1 {
+ t.Fatal("Size mismatched! Expected:", 10000*srcSize, "but got:", objProps.Size)
+ }
+}
+
+// Test concatenating multiple objects objects
+func TestCompose10KSourcesV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ testComposeMultipleSources(c, t)
+}
+
+func testEncryptedCopyObject(c *Client, t *testing.T) {
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ key1 := NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
+ key2 := NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(map[string][]string)
+ for k, v := range key1.GetSSEHeaders() {
+ metadata[k] = append(metadata[k], v)
+ }
+ _, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
+ if err != nil {
+ t.Fatal("PutObjectWithSize Error:", err)
+ }
+
+ // 2. copy object and change encryption key
+ src := NewSourceInfo(bucketName, "srcObject", &key1)
+ dst, err := NewDestinationInfo(bucketName, "dstObject", &key2, nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ t.Fatal("CopyObject Error:", err)
+ }
+
+ // 3. get copied object and check if content is equal
+ reqH := NewGetReqHeaders()
+ for k, v := range key2.GetSSEHeaders() {
+ reqH.Set(k, v)
+ }
+ coreClient := Core{c}
+ reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
+ if err != nil {
+ t.Fatal("GetObject Error:", err)
+ }
+ defer reader.Close()
+
+ decBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ if !bytes.Equal(decBytes, buf) {
+ log.Fatal("downloaded object mismatched for encrypted object")
+ }
+}
+
+// Test encrypted copy object
+func TestEncryptedCopyObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ testEncryptedCopyObject(c, t)
+}
+
+func testUserMetadataCopying(c *Client, t *testing.T) {
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(bucketName, object)
+ if err != nil {
+ t.Fatal("Metadata fetch error:", err)
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ // 1. create a client encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(http.Header)
+ metadata.Set("x-amz-meta-myheader", "myvalue")
+ _, err = c.PutObjectWithMetadata(bucketName, "srcObject",
+ bytes.NewReader(buf), metadata, nil)
+ if err != nil {
+ t.Fatal("Put Error:", err)
+ }
+ if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
+ t.Fatal("Unequal metadata")
+ }
+
+ // 2. create source
+ src := NewSourceInfo(bucketName, "srcObject", nil)
+ // 2.1 create destination with metadata set
+ dst1, err := NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // 3. Check that copying to an object with metadata set resets
+ // the headers on the copy.
+ err = c.CopyObject(dst1, src)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ expectedHeaders := make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
+ t.Fatal("Unequal metadata")
+ }
+
+ // 4. create destination with no metadata set and same source
+ dst2, err := NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+
+ }
+ src = NewSourceInfo(bucketName, "srcObject", nil)
+
+ // 5. Check that copying to an object with no metadata set,
+ // copies metadata.
+ err = c.CopyObject(dst2, src)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ expectedHeaders = metadata
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
+ t.Fatal("Unequal metadata")
+ }
+
+ // 6. Compose a pair of sources.
+ srcs := []SourceInfo{
+ NewSourceInfo(bucketName, "srcObject", nil),
+ NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst3, err := NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+
+ }
+
+ err = c.ComposeObject(dst3, srcs)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Check that no headers are copied in this case
+ if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
+ t.Fatal("Unequal metadata")
+ }
+
+ // 7. Compose a pair of sources with dest user metadata set.
+ srcs = []SourceInfo{
+ NewSourceInfo(bucketName, "srcObject", nil),
+ NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst4, err := NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ t.Fatal("Error:", err)
+
+ }
+
+ err = c.ComposeObject(dst4, srcs)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Check that no headers are copied in this case
+ expectedHeaders = make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
+ t.Fatal("Unequal metadata")
+ }
+}
+
+func TestUserMetadataCopyingV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopying(c, t)
+}
diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
index b5e6d128a..e9593ddaf 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v4_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
@@ -18,7 +18,6 @@ package minio
import (
"bytes"
- crand "crypto/rand"
"encoding/hex"
"errors"
"fmt"
@@ -67,7 +66,7 @@ func TestMakeBucketError(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
- if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
t.Skip("skipping region functional tests for non s3 runs")
}
@@ -76,10 +75,10 @@ func TestMakeBucketError(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -109,6 +108,20 @@ func TestMakeBucketError(t *testing.T) {
if err = c.RemoveBucket(bucketName); err != nil {
t.Fatal("Error:", err, bucketName)
}
+ if err = c.MakeBucket(bucketName+"..-1", "eu-central-1"); err == nil {
+ t.Fatal("Error:", err, bucketName+"..-1")
+ }
+ // Verify valid error response.
+ if err != nil && err.Error() != "Bucket name contains invalid characters" {
+ t.Fatal("Error: Invalid error returned by server", err)
+ }
+ if err = c.MakeBucket(bucketName+"AAA-1", "eu-central-1"); err == nil {
+ t.Fatal("Error:", err, bucketName+"..-1")
+ }
+ // Verify valid error response.
+ if err != nil && err.Error() != "Bucket name contains invalid characters" {
+ t.Fatal("Error: Invalid error returned by server", err)
+ }
}
// Tests various bucket supported formats.
@@ -116,7 +129,7 @@ func TestMakeBucketRegions(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
- if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
t.Skip("skipping region functional tests for non s3 runs")
}
@@ -125,10 +138,10 @@ func TestMakeBucketRegions(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -176,10 +189,10 @@ func TestPutObjectReadAt(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -201,14 +214,10 @@ func TestPutObjectReadAt(t *testing.T) {
}
// Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
- buf := make([]byte, minPartSize*4)
- // Use crand.Reader for multipart tests to ensure part order at the end.
- size, err := io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ // Use different data for each part for multipart tests to ensure part order at the end.
+ var buf []byte
+ for i := 0; i < 4; i++ {
+ buf = append(buf, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
}
// Save the data
@@ -270,10 +279,10 @@ func TestPutObjectWithMetadata(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -295,14 +304,10 @@ func TestPutObjectWithMetadata(t *testing.T) {
}
// Generate data using 2 parts
- buf := make([]byte, minPartSize*2)
- // Use crand.Reader for multipart tests to ensure part order at the end.
- size, err := io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*2 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, size)
+ // Use different data in each part for multipart tests to ensure part order at the end.
+ var buf []byte
+ for i := 0; i < 2; i++ {
+ buf = append(buf, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
}
// Save the data
@@ -311,7 +316,9 @@ func TestPutObjectWithMetadata(t *testing.T) {
// Object custom metadata
customContentType := "custom/contenttype"
- n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{"Content-Type": {customContentType}}, nil)
+ n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{
+ "Content-Type": {customContentType},
+ }, nil)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -366,10 +373,10 @@ func TestPutObjectStreaming(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV4(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -419,8 +426,8 @@ func TestPutObjectStreaming(t *testing.T) {
}
}
-// Test listing partially uploaded objects.
-func TestListPartiallyUploaded(t *testing.T) {
+// Test listing no partially uploaded objects upon putObject error.
+func TestListNoPartiallyUploadedObjects(t *testing.T) {
if testing.Short() {
t.Skip("skipping function tests for short runs")
}
@@ -430,10 +437,10 @@ func TestListPartiallyUploaded(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -475,18 +482,25 @@ func TestListPartiallyUploaded(t *testing.T) {
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
- if err.Error() != "proactively closed to be verified later" {
+ if !strings.Contains(err.Error(), "proactively closed to be verified later") {
t.Fatal("Error:", err)
}
doneCh := make(chan struct{})
defer close(doneCh)
+
isRecursive := true
multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
+
+ var activeUploads bool
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
t.Fatalf("Error: Error when listing incomplete upload")
}
+ activeUploads = true
+ }
+ if activeUploads {
+ t.Errorf("There should be no active uploads in progress upon error for %s/%s", bucketName, objectName)
}
err = c.RemoveBucket(bucketName)
@@ -506,10 +520,10 @@ func TestGetOjectSeekEnd(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -601,10 +615,10 @@ func TestGetObjectClosedTwice(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -681,10 +695,10 @@ func TestRemoveMultipleObjects(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
@@ -745,282 +759,6 @@ func TestRemoveMultipleObjects(t *testing.T) {
}
}
-// Tests removing partially uploaded objects.
-func TestRemovePartiallyUploaded(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, 128*1024)
- if cerr != nil {
- t.Fatal("Error:", cerr, bucketName)
- }
- i++
- r.Seek(0, 0)
- }
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
- }()
-
- objectName := bucketName + "-resumable"
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err == nil {
- t.Fatal("Error: PutObject should fail.")
- }
- if err.Error() != "proactively closed to be verified later" {
- t.Fatal("Error:", err)
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests resumable put object cloud to cloud.
-func TestResumablePutObject(t *testing.T) {
- // By passing 'go test -short' skips these tests.
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Create a temporary file.
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
- r := bytes.NewReader(bytes.Repeat([]byte("b"), minPartSize*2))
- // Copy 11MiB worth of random data.
- n, err := io.CopyN(file, r, minPartSize*2)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*2) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
- }
-
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- t.Fatal("Error:", err)
- }
-
- // New object name.
- objectName := bucketName + "-resumable"
- objectContentType := "application/custom-octet-stream"
-
- // Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), objectContentType)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*2) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
- }
-
- // Get the uploaded object.
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Get object info.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- if objInfo.ContentType != objectContentType {
- t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
- }
-
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, objectContentType)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- if n != objInfo.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
- }
-
- // Remove all temp files, objects and bucket.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-put")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests resumable file based put object multipart upload.
-func TestResumableFPutObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload 4 parts to use all 3 multipart 'workers' and have an extra part.
- buffer := make([]byte, minPartSize*4)
- // Use crand.Reader for multipart tests to ensure parts are uploaded in correct order.
- size, err := io.ReadFull(crand.Reader, buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
- }
- size, err = file.Write(buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- objectName := bucketName + "-resumable"
-
- n, err := c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*4) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
// Tests FPutObject of a big file to trigger multipart
func TestFPutObjectMultipart(t *testing.T) {
if testing.Short() {
@@ -1032,10 +770,10 @@ func TestFPutObjectMultipart(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1063,16 +801,12 @@ func TestFPutObjectMultipart(t *testing.T) {
}
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- buffer := make([]byte, minPartSize*4)
-
- size, err := io.ReadFull(crand.Reader, buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ var buffer []byte
+ for i := 0; i < 4; i++ {
+ buffer = append(buffer, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
}
- size, err = file.Write(buffer)
+
+ size, err := file.Write(buffer)
if err != nil {
t.Fatal("Error:", err)
}
@@ -1137,10 +871,10 @@ func TestFPutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1168,18 +902,14 @@ func TestFPutObject(t *testing.T) {
}
// Upload 4 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
- buffer := make([]byte, minPartSize*4)
- // Use random data for multipart tests to check parts are uploaded in correct order.
- size, err := io.ReadFull(crand.Reader, buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var buffer []byte
+ for i := 0; i < 4; i++ {
+ buffer = append(buffer, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
}
// Write the data to the file.
- size, err = file.Write(buffer)
+ size, err := file.Write(buffer)
if err != nil {
t.Fatal("Error:", err)
}
@@ -1297,10 +1027,10 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1451,10 +1181,10 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1600,10 +1330,10 @@ func TestPresignedPostPolicy(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1695,10 +1425,10 @@ func TestCopyObject(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1750,41 +1480,45 @@ func TestCopyObject(t *testing.T) {
t.Fatal("Error:", err)
}
+ // Copy Source
+ src := NewSourceInfo(bucketName, objectName, nil)
+
// Set copy conditions.
- copyConds := CopyConditions{}
- // Start by setting wrong conditions
- err = copyConds.SetModified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ // All invalid conditions first.
+ err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetUnmodified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetMatchETag("")
+ err = src.SetMatchETagCond("")
if err == nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetMatchETagExcept("")
+ err = src.SetMatchETagExceptCond("")
if err == nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetMatchETag(objInfo.ETag)
+ err = src.SetMatchETagCond(objInfo.ETag)
if err != nil {
t.Fatal("Error:", err)
}
- // Copy source.
- copySource := bucketName + "/" + objectName
+ dst, err := NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
// Perform the Copy
- err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ err = c.CopyObject(dst, src)
if err != nil {
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
}
@@ -1814,18 +1548,18 @@ func TestCopyObject(t *testing.T) {
}
// CopyObject again but with wrong conditions
- copyConds = CopyConditions{}
- err = copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ src = NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
}
- err = copyConds.SetMatchETagExcept(objInfo.ETag)
+ err = src.SetMatchETagExceptCond(objInfo.ETag)
if err != nil {
t.Fatal("Error:", err)
}
// Perform the Copy which should fail
- err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ err = c.CopyObject(dst, src)
if err == nil {
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
}
@@ -1863,10 +1597,10 @@ func TestEncryptionPutGet(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1983,6 +1717,7 @@ func TestEncryptionPutGet(t *testing.T) {
if err != nil {
t.Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
}
+ defer r.Close()
// Compare the sent object with the received one
recvBuffer := bytes.NewBuffer([]byte{})
@@ -2029,10 +1764,10 @@ func TestBucketNotification(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -2105,10 +1840,10 @@ func TestFunctional(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -2433,10 +2168,10 @@ func TestGetObjectObjectModified(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV4(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -2498,3 +2233,178 @@ func TestGetObjectObjectModified(t *testing.T) {
t.Errorf("Expected ReadAt to fail with error %s but received %s", s3ErrorResponseMap["PreconditionFailed"], err.Error())
}
}
+
+// Test validates putObject to upload a file seeked at a given offset.
+func TestPutObjectUploadSeekedObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object.
+ c, err := NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+ defer c.RemoveBucket(bucketName)
+
+ tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ var length = 120000
+ data := bytes.Repeat([]byte("1"), length)
+
+ if _, err = tempfile.Write(data); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+
+ offset := length / 2
+ if _, err := tempfile.Seek(int64(offset), 0); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(length-offset) {
+ t.Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
+ }
+ tempfile.Close()
+ if err = os.Remove(tempfile.Name()); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ length = int(n)
+
+ obj, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ n, err = obj.Seek(int64(offset), 0)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(offset) {
+ t.Fatalf("Invalid offset returned, want %v, got %v", int64(offset), n)
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(length-offset) {
+ t.Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
+ }
+
+ if err = c.RemoveObject(bucketName, objectName); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test expected error cases
+func TestComposeObjectErrorCases(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ testComposeObjectErrorCases(c, t)
+}
+
+// Test concatenating 10K objects
+func TestCompose10KSources(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ testComposeMultipleSources(c, t)
+}
+
+// Test encrypted copy object
+func TestEncryptedCopyObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObject(c, t)
+}
+
+func TestUserMetadataCopying(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Instantiate new minio client object
+ c, err := NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopying(c, t)
+}
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
index c1db0df5d..2a9db3cb6 100644
--- a/vendor/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,10 +22,12 @@ import (
"io"
"io/ioutil"
"net/http"
+ "net/url"
"os"
"strings"
"testing"
+ "github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/policy"
)
@@ -179,27 +182,6 @@ func TestValidBucketLocation(t *testing.T) {
}
}
-// Tests temp file.
-func TestTempFile(t *testing.T) {
- tmpFile, err := newTempFile("testing")
- if err != nil {
- t.Fatal("Error:", err)
- }
- fileName := tmpFile.Name()
- // Closing temporary file purges the file.
- err = tmpFile.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
- st, err := os.Stat(fileName)
- if err != nil && !os.IsNotExist(err) {
- t.Fatal("Error:", err)
- }
- if err == nil && st != nil {
- t.Fatal("Error: file should be deleted and should not exist.")
- }
-}
-
// Tests error response structure.
func TestErrorResponse(t *testing.T) {
var err error
@@ -228,18 +210,18 @@ func TestErrorResponse(t *testing.T) {
// Tests signature type.
func TestSignatureType(t *testing.T) {
clnt := Client{}
- if !clnt.signature.isV4() {
+ if !clnt.overrideSignerType.IsV4() {
t.Fatal("Error")
}
- clnt.signature = SignatureV2
- if !clnt.signature.isV2() {
+ clnt.overrideSignerType = credentials.SignatureV2
+ if !clnt.overrideSignerType.IsV2() {
t.Fatal("Error")
}
- if clnt.signature.isV4() {
+ if clnt.overrideSignerType.IsV4() {
t.Fatal("Error")
}
- clnt.signature = SignatureV4
- if !clnt.signature.isV4() {
+ clnt.overrideSignerType = credentials.SignatureV4
+ if !clnt.overrideSignerType.IsV4() {
t.Fatal("Error")
}
}
@@ -300,3 +282,56 @@ func TestPartSize(t *testing.T) {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
}
}
+
+// TestMakeTargetURL - testing makeTargetURL()
+func TestMakeTargetURL(t *testing.T) {
+ testCases := []struct {
+ addr string
+ secure bool
+ bucketName string
+ objectName string
+ bucketLocation string
+ queryValues map[string][]string
+ expectedURL url.URL
+ expectedErr error
+ }{
+ // Test 1
+ {"localhost:9000", false, "", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/"}, nil},
+ // Test 2
+ {"localhost", true, "", "", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/"}, nil},
+ // Test 3
+ {"localhost:9000", true, "mybucket", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "https", Path: "/mybucket/"}, nil},
+ // Test 4, testing against google storage API
+ {"storage.googleapis.com", true, "mybucket", "", "", nil, url.URL{Host: "mybucket.storage.googleapis.com", Scheme: "https", Path: "/"}, nil},
+ // Test 5, testing against AWS S3 API
+ {"s3.amazonaws.com", true, "mybucket", "myobject", "", nil, url.URL{Host: "mybucket.s3.amazonaws.com", Scheme: "https", Path: "/myobject"}, nil},
+ // Test 6
+ {"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
+ // Test 7, testing with query
+ {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
+ // Test 8, testing with port 80
+ {"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
+ // Test 9, testing with port 443
+ {"localhost:443", true, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/mybucket/myobject"}, nil},
+ }
+
+ for i, testCase := range testCases {
+ // Initialize a Minio client
+ c, _ := New(testCase.addr, "foo", "bar", testCase.secure)
+ u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, testCase.queryValues)
+ // Check the returned error
+ if testCase.expectedErr == nil && err != nil {
+ t.Fatalf("Test %d: Should succeed but failed with err = %v", i+1, err)
+ }
+ if testCase.expectedErr != nil && err == nil {
+ t.Fatalf("Test %d: Should fail but succeeded", i+1)
+ }
+ if err == nil {
+ // Check if the returned url is equal to what we expect
+ if u.String() != testCase.expectedURL.String() {
+ t.Fatalf("Test %d: Mismatched target url: expected = `%v`, found = `%v`",
+ i+1, testCase.expectedURL.String(), u.String())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
index be746a7bf..4f5c1b390 100644
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -17,6 +17,8 @@ install:
- go version
- go env
- go get -u github.com/golang/lint/golint
+ - go get -u github.com/go-ini/ini
+ - go get -u github.com/minio/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 28799c69d..6d2a40f78 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +24,7 @@ import (
"path"
"sync"
+ "github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -71,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) {
// GetBucketLocation - get location for the bucket name from location cache, if not
// fetch freshly by making a new request.
func (c Client) GetBucketLocation(bucketName string) (string, error) {
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
return c.getBucketLocation(bucketName)
@@ -80,21 +82,27 @@ func (c Client) GetBucketLocation(bucketName string) (string, error) {
// getBucketLocation - Get location for the bucketName from location map cache, if not
// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) {
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
+ // Region set then no need to fetch bucket location.
+ if c.region != "" {
+ return c.region, nil
+ }
+
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
// China region.
return "cn-north-1", nil
- }
-
- // Region set then no need to fetch bucket location.
- if c.region != "" {
- return c.region, nil
+ } else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
+ // For us-gov specifically we need to set everything to
+ // us-gov-west-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // Gov cloud region.
+ return "us-gov-west-1", nil
}
if location, ok := c.bucketLocCache.Get(bucketName); ok {
@@ -181,8 +189,33 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set UserAgent for the request.
c.setUserAgent(req)
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
// Set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
+ switch {
+ case signerType.IsV4():
var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
@@ -190,13 +223,10 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
+ case signerType.IsV2():
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
}
- // Sign the request.
- if c.signature.isV4() {
- req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
- }
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go
index 0c068c966..6ae4e7be4 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache_test.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,6 +28,7 @@ import (
"reflect"
"testing"
+ "github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
)
@@ -86,17 +88,46 @@ func TestGetBucketLocationRequest(t *testing.T) {
// Set UserAgent for the request.
c.setUserAgent(req)
- // Set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
}
- // Sign the request.
- if c.signature.isV4() {
- req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
}
+
+ // Set sha256 sum for signature calculation only
+ // with signature version '4'.
+ switch {
+ case signerType.IsV4():
+ var contentSha256 string
+ if c.secure {
+ contentSha256 = unsignedPayload
+ } else {
+ contentSha256 = hex.EncodeToString(sum256([]byte{}))
+ }
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
+ case signerType.IsV2():
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ }
+
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 6055bfdad..9771d2f92 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -18,10 +18,18 @@ package minio
/// Multipart upload defaults.
-// miniPartSize - minimum part size 64MiB per object after which
+// absMinPartSize - absolute minimum part size (5 MiB) below which
+// a part in a multipart upload may not be uploaded.
+const absMinPartSize = 1024 * 1024 * 5
+
+// minPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
+// copyPartSize - default (and maximum) part size to copy in a
+// copy-object request (5GiB)
+const copyPartSize = 1024 * 1024 * 1024 * 5
+
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
@@ -37,10 +45,6 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
-// optimalReadBufferSize - optimal buffer 5MiB used for reading
-// through Read operation.
-const optimalReadBufferSize = 1024 * 1024 * 5
-
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
diff --git a/vendor/github.com/minio/minio-go/copy-conditions.go b/vendor/github.com/minio/minio-go/copy-conditions.go
deleted file mode 100644
index 65018aa09..000000000
--- a/vendor/github.com/minio/minio-go/copy-conditions.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net/http"
- "time"
-)
-
-// copyCondition explanation:
-// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
-//
-// Example:
-//
-// copyCondition {
-// key: "x-amz-copy-if-modified-since",
-// value: "Tue, 15 Nov 1994 12:45:26 GMT",
-// }
-//
-type copyCondition struct {
- key string
- value string
-}
-
-// CopyConditions - copy conditions.
-type CopyConditions struct {
- conditions []copyCondition
-}
-
-// NewCopyConditions - Instantiate new list of conditions. This
-// function is left behind for backward compatibility. The idiomatic
-// way to set an empty set of copy conditions is,
-// ``copyConditions := CopyConditions{}``.
-//
-func NewCopyConditions() CopyConditions {
- return CopyConditions{}
-}
-
-// SetMatchETag - set match etag.
-func (c *CopyConditions) SetMatchETag(etag string) error {
- if etag == "" {
- return ErrInvalidArgument("ETag cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-match",
- value: etag,
- })
- return nil
-}
-
-// SetMatchETagExcept - set match etag except.
-func (c *CopyConditions) SetMatchETagExcept(etag string) error {
- if etag == "" {
- return ErrInvalidArgument("ETag cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-none-match",
- value: etag,
- })
- return nil
-}
-
-// SetUnmodified - set unmodified time since.
-func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
- if modTime.IsZero() {
- return ErrInvalidArgument("Modified since cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-unmodified-since",
- value: modTime.Format(http.TimeFormat),
- })
- return nil
-}
-
-// SetModified - set modified time since.
-func (c *CopyConditions) SetModified(modTime time.Time) error {
- if modTime.IsZero() {
- return ErrInvalidArgument("Modified since cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-modified-since",
- value: modTime.Format(http.TimeFormat),
- })
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
index be9388cec..4b1054a69 100644
--- a/vendor/github.com/minio/minio-go/core.go
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -70,7 +70,13 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
- return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)
+ return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
+}
+
+// PutObjectPartWithMetadata - upload an object part with additional request metadata.
+func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
+ size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
+ return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@@ -80,7 +86,9 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
- _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})
+ _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
+ Parts: parts,
+ })
return err
}
diff --git a/vendor/github.com/minio/minio-go/core_test.go b/vendor/github.com/minio/minio-go/core_test.go
index c7c73d4c7..81e1cd5bf 100644
--- a/vendor/github.com/minio/minio-go/core_test.go
+++ b/vendor/github.com/minio/minio-go/core_test.go
@@ -19,7 +19,6 @@ package minio
import (
"bytes"
"crypto/md5"
- crand "crypto/rand"
"io"
"math/rand"
@@ -29,6 +28,13 @@ import (
"time"
)
+const (
+ serverEndpoint = "SERVER_ENDPOINT"
+ accessKey = "ACCESS_KEY"
+ secretKey = "SECRET_KEY"
+ enableSecurity = "ENABLE_HTTPS"
+)
+
// Tests for Core GetObject() function.
func TestGetObjectCore(t *testing.T) {
if testing.Short() {
@@ -40,10 +46,10 @@ func TestGetObjectCore(t *testing.T) {
// Instantiate new minio core client object.
c, err := NewCore(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -214,10 +220,10 @@ func TestGetBucketPolicy(t *testing.T) {
// Instantiate new minio client object.
c, err := NewCore(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -277,10 +283,10 @@ func TestCorePutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := NewCore(
- os.Getenv("S3_ADDRESS"),
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- mustParseBool(os.Getenv("S3_SECURE")),
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
@@ -301,15 +307,7 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
- buf := make([]byte, minPartSize)
-
- size, err := io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize, size)
- }
+ buf := bytes.Repeat([]byte("a"), minPartSize)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index 06735427e..e0d0a11e6 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -50,17 +50,21 @@ func main() {
}
```
-| Bucket operations |Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
-|:---|:---|:---|:---|:---|:---|
-|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
-|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
-|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
-| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
-|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
-|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
-|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) |
-| | [`FPutObject`](#FPutObject) | | | |
-| | [`FGetObject`](#FGetObject) | | | |
+| Bucket operations | Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
+| :--- | :--- | :--- | :--- | :--- | :--- |
+| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
+| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
+| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`NewSSEInfo`](#NewSSEInfo) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
+| | [`FPutObject`](#FPutObject) | | | | |
+| | [`FGetObject`](#FGetObject) | | | | |
+| | [`ComposeObject`](#ComposeObject) | | | | |
+| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
+| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
+
## 1. Constructor
<a name="Minio"></a>
@@ -438,9 +442,6 @@ if err != nil {
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
-In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
-
-
__Parameters__
@@ -505,9 +506,11 @@ if err != nil {
<a name="CopyObject"></a>
-### CopyObject(bucketName, objectName, objectSource string, conditions CopyConditions) error
+### CopyObject(dst DestinationInfo, src SourceInfo) error
-Copy a source object into a new object with the provided name in the provided bucket.
+Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details.
+
+To copy multiple source objects into a single destination object see the `ComposeObject` API.
__Parameters__
@@ -515,50 +518,161 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |Name of the bucket |
-|`objectName` | _string_ |Name of the object |
-|`objectSource` | _string_ |Name of the source object |
-|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`]|
+|`dst` | _DestinationInfo_ |Argument describing the destination object |
+|`src` | _SourceInfo_ |Argument describing the source object |
__Example__
```go
-// Use-case-1
-// To copy an existing object to a new object with _no_ copy conditions.
-copyConds := minio.CopyConditions{}
-err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
+// Use-case 1: Simple copy object with no conditions, etc
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
+// Destination object
+dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+
+/ Copy object call
+err = s3Client.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
-// Use-case-2
-// To copy an existing object to a new object with the following copy conditions
+// Use-case 2: Copy object with copy-conditions, and copying only part of the source object.
// 1. that matches a given ETag
// 2. and modified after 1st April 2014
// 3. but unmodified since 23rd April 2014
+// 4. copy only first 1MiB of object.
+
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
-// Initialize empty copy conditions.
-var copyConds = minio.CopyConditions{}
+// Set matching ETag condition, copy object which matches the following ETag.
+src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
-// copy object that matches the given ETag.
-copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+// Set modified condition, copy object modified since 2014 April 1.
+src.SetModifiedSinceCond(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
-// and modified after 1st April 2014
-copyConds.SetModified(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
+// Set unmodified condition, copy object unmodified since 2014 April 23.
+src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
-// but unmodified since 23rd April 2014
-copyConds.SetUnmodified(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
+// Set copy-range of only first 1MiB of file.
+src.SetRange(0, 1024*1024-1)
-err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
+// Destination object
+dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+
+/ Copy object call
+err = s3Client.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
```
+<a name="ComposeObject"></a>
+### ComposeObject(dst DestinationInfo, srcs []SourceInfo) error
+
+Create an object by concatenating a list of source objects using
+server-side copying.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---|:---|
+|`dst` | _minio.DestinationInfo_ |Struct with info about the object to be created. |
+|`srcs` | _[]minio.SourceInfo_ |Slice of struct with info about source objects to be concatenated in order. |
+
+
+__Example__
+
+
+```go
+// Prepare source decryption key (here we assume same key to
+// decrypt all source objects.)
+decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
+
+// Source objects to concatenate. We also specify decryption
+// key for each
+src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
+src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
+src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
+
+src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
+src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
+
+// Create slice of sources.
+srcs := []minio.SourceInfo{src1, src2, src3}
+
+// Prepare destination encryption key
+encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
+
+// Create destination info
+dst := minio.NewDestinationInfo("bucket", "object", encKey, nil)
+err = s3Client.ComposeObject(dst, srcs)
+if err != nil {
+ log.Println(err)
+ return
+}
+
+log.Println("Composed object successfully.")
+```
+
+<a name="NewSourceInfo"></a>
+### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
+
+Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source.
+
+__Parameters__
+
+| Param | Type | Description |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | Name of the source bucket |
+| `object` | _string_ | Name of the source object |
+| `decryptSSEC` | _*minio.SSEInfo_ | Decryption info for the source object (`nil` without encryption) |
+
+__Example__
+
+``` go
+// No decryption parameter.
+src := NewSourceInfo("bucket", "object", nil)
+
+// With decryption parameter.
+decKey := NewSSEKey([]byte{1,2,3}, "")
+src := NewSourceInfo("bucket", "object", decKey)
+```
+
+<a name="NewDestinationInfo"></a>
+### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) DestinationInfo
+
+Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`.
+
+__Parameters__
+
+| Param | Type | Description |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | Name of the destination bucket |
+| `object` | _string_ | Name of the destination object |
+| `encryptSSEC` | _*minio.SSEInfo_ | Encryption info for the source object (`nil` without encryption) |
+| `userMeta` | _map[string]string_ | User metadata to be set on the destination. If nil, with only one source, user-metadata is copied from source. |
+
+__Example__
+
+``` go
+// No encryption parameter.
+src := NewDestinationInfo("bucket", "object", nil, nil)
+
+// With encryption parameter.
+encKey := NewSSEKey([]byte{1,2,3}, "")
+src := NewDecryptionInfo("bucket", "object", encKey, nil)
+```
+
+
<a name="FPutObject"></a>
### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error)
@@ -566,8 +680,6 @@ Uploads contents from a file to objectName.
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
-In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
-
__Parameters__
@@ -771,7 +883,7 @@ if err != nil {
```
<a name="GetEncryptedObject"></a>
-### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.Reader, error)
+### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.ReadCloser, error)
Returns the decrypted stream of the object data based of the given encryption materiels. Most of the common errors occur when reading the stream.
@@ -788,7 +900,7 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`stream` | _io.Reader_ | Returns the deciphered object reader. |
+|`stream` | _io.ReadCloser_ | Returns the deciphered object reader, caller should close after reading. |
|`err` | _error | Returns errors. |
@@ -810,11 +922,14 @@ if err != nil {
fmt.Println(err)
return
}
+defer object.Close()
+
localFile, err := os.Create("/tmp/local-file.jpg")
if err != nil {
fmt.Println(err)
return
}
+
if _, err = io.Copy(localFile, object); err != nil {
fmt.Println(err)
return
@@ -883,6 +998,26 @@ if err != nil {
}
```
+<a name="NewSSEInfo"></a>
+
+### NewSSEInfo(key []byte, algo string) SSEInfo
+
+Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C).
+
+__Parameters__
+
+| Param | Type | Description |
+| :--- | :--- | :--- |
+| `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key |
+| `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) |
+
+__Example__
+
+``` go
+// Key for use in encryption/decryption
+keyInfo := NewSSEInfo([]byte{1,2,3}, "")
+```
+
## 5. Presigned operations
<a name="PresignedGetObject"></a>
@@ -1240,7 +1375,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket |
+|`chan NotificationInfo` | _chan_ | Read channel for all notifications on bucket |
|`NotificationInfo` | _object_ | Notification object represents events info |
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
diff --git a/vendor/github.com/minio/minio-go/examples/s3/composeobject.go b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go
new file mode 100644
index 000000000..555d98bc3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go
@@ -0,0 +1,74 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ minio "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Enable trace.
+ // s3Client.TraceOn(os.Stderr)
+
+ // Prepare source decryption key (here we assume same key to
+ // decrypt all source objects.)
+ decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
+
+ // Source objects to concatenate. We also specify decryption
+ // key for each
+ src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
+ src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+ src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
+ src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
+
+ src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
+ src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
+
+ // Create slice of sources.
+ srcs := []minio.SourceInfo{src1, src2, src3}
+
+ // Prepare destination encryption key
+ encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
+
+ // Create destination info
+ dst := minio.NewDestinationInfo("bucket", "object", encKey)
+ err = s3Client.ComposeObject(dst, srcs)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ log.Println("Composed object successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
index a9ec78fee..0de865555 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
@@ -42,24 +42,28 @@ func main() {
// Enable trace.
// s3Client.TraceOn(os.Stderr)
+ // Source object
+ src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
// All following conditions are allowed and can be combined together.
- // Set copy conditions.
- var copyConds = minio.CopyConditions{}
// Set modified condition, copy object modified since 2014 April.
- copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set unmodified condition, copy object unmodified since 2014 April.
- // copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ // src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set matching ETag condition, copy object which matches the following ETag.
- // copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+ // src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
// Set matching ETag except condition, copy object which does not match the following ETag.
- // copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+ // src.SetMatchETagExceptCond("31624deb84149d2f8ef9c385918b653a")
+
+ // Destination object
+ dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil)
// Initiate copy object.
- err = s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+ err = s3Client.CopyObject(dst, src)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
index e997140be..8f51f26ae 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
@@ -24,6 +24,7 @@ import (
"os"
"github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/encrypt"
)
func main() {
@@ -59,10 +60,10 @@ func main() {
////
// Build a symmetric key
- symmetricKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
+ symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
// Build encryption materials which will encrypt uploaded data
- cbcMaterials, err := minio.NewCBCSecureMaterials(symmetricKey)
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
if err != nil {
log.Fatalln(err)
}
@@ -72,6 +73,7 @@ func main() {
if err != nil {
log.Fatalln(err)
}
+ defer reader.Close()
// Local file which holds plain data
localFile, err := os.Create("my-testfile")
diff --git a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
index f03f82147..b8f7e12f2 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
@@ -23,6 +23,7 @@ import (
"os"
"github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/encrypt"
)
func main() {
@@ -65,10 +66,10 @@ func main() {
////
// Build a symmetric key
- symmetricKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
+ symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
// Build encryption materials which will encrypt uploaded data
- cbcMaterials, err := minio.NewCBCSecureMaterials(symmetricKey)
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
new file mode 100644
index 000000000..92e6a4840
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
@@ -0,0 +1,87 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "io/ioutil"
+ "log"
+ "net/http"
+
+ minio "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ minioClient, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ content := bytes.NewReader([]byte("Hello again"))
+ key := []byte("32byteslongsecretkeymustprovided")
+ h := md5.New()
+ h.Write(key)
+ encryptionKey := base64.StdEncoding.EncodeToString(key)
+ encryptionKeyMD5 := base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ // Amazon S3 does not store the encryption key you provide.
+ // Instead S3 stores a randomly salted HMAC value of the
+ // encryption key in order to validate future requests.
+ // The salted HMAC value cannot be used to derive the value
+ // of the encryption key or to decrypt the contents of the
+ // encrypted object. That means, if you lose the encryption
+ // key, you lose the object.
+ var metadata = map[string][]string{
+ "x-amz-server-side-encryption-customer-algorithm": []string{"AES256"},
+ "x-amz-server-side-encryption-customer-key": []string{encryptionKey},
+ "x-amz-server-side-encryption-customer-key-MD5": []string{encryptionKeyMD5},
+ }
+
+ // minioClient.TraceOn(os.Stderr) // Enable to debug.
+ _, err = minioClient.PutObjectWithMetadata("mybucket", "my-encrypted-object.txt", content, metadata, nil)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ var reqHeaders = minio.RequestHeaders{Header: http.Header{}}
+ for k, v := range metadata {
+ reqHeaders.Set(k, v[0])
+ }
+ coreClient := minio.Core{minioClient}
+ reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", reqHeaders)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ decBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ if !bytes.Equal(decBytes, []byte("Hello again")) {
+ log.Fatalln("Expected \"Hello, world\", got %s", string(decBytes))
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
index f668adf70..1179fd787 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
@@ -50,9 +50,8 @@ func main() {
log.Fatalln(err)
}
- // progress reader is notified as PutObject makes progress with
- // the read. For partial resume put object, progress reader is
- // appropriately advanced.
+ // Progress reader is notified as PutObject makes progress with
+ // the Reads inside.
progress := pb.New64(objectInfo.Size)
progress.Start()
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
new file mode 100644
index 000000000..6b0e57440
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
@@ -0,0 +1,89 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "fmt"
+
+// A Chain will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The Chain provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the
+// Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error, collecting all errors from all providers.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvAWSS3{},
+// &credentials.EnvMinio{},
+// })
+//
+// // Usage of ChainCredentials.
+// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
+// if err != nil {
+// log.Fatalln(err)
+// }
+//
+type Chain struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return New(&Chain{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *Chain) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ } // Success.
+ c.curr = p
+ return creds, nil
+ }
+ c.curr = nil
+ return Value{}, fmt.Errorf("No valid providers found %v", errs)
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *Chain) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go
new file mode 100644
index 000000000..cb5a6dda5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go
@@ -0,0 +1,137 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "errors"
+ "testing"
+)
+
+type testCredProvider struct {
+ creds Value
+ expired bool
+ err error
+}
+
+func (s *testCredProvider) Retrieve() (Value, error) {
+ s.expired = false
+ return s.creds, s.err
+}
+func (s *testCredProvider) IsExpired() bool {
+ return s.expired
+}
+
+func TestChainGet(t *testing.T) {
+ p := &Chain{
+ Providers: []Provider{
+ &credProvider{err: errors.New("FirstError")},
+ &credProvider{err: errors.New("SecondError")},
+ &testCredProvider{
+ creds: Value{
+ AccessKeyID: "AKIF",
+ SecretAccessKey: "NOSECRET",
+ SessionToken: "",
+ },
+ },
+ &credProvider{
+ creds: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ },
+ },
+ }
+
+ creds, err := p.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Also check credentials
+ if creds.AccessKeyID != "AKIF" {
+ t.Fatalf("Expected 'AKIF', got %s", creds.AccessKeyID)
+ }
+ if creds.SecretAccessKey != "NOSECRET" {
+ t.Fatalf("Expected 'NOSECRET', got %s", creds.SecretAccessKey)
+ }
+ if creds.SessionToken != "" {
+ t.Fatalf("Expected empty token, got %s", creds.SessionToken)
+ }
+}
+
+func TestChainIsExpired(t *testing.T) {
+ credProvider := &credProvider{expired: true}
+ p := &Chain{
+ Providers: []Provider{
+ credProvider,
+ },
+ }
+
+ if !p.IsExpired() {
+ t.Fatal("Expected expired to be true before any Retrieve")
+ }
+
+ _, err := p.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.IsExpired() {
+ t.Fatal("Expected to be not expired after Retrieve")
+ }
+}
+
+func TestChainWithNoProvider(t *testing.T) {
+ p := &Chain{
+ Providers: []Provider{},
+ }
+ if !p.IsExpired() {
+ t.Fatal("Expected to be expired with no providers")
+ }
+ _, err := p.Retrieve()
+ if err != nil {
+ if err.Error() != "No valid providers found []" {
+ t.Error(err)
+ }
+ }
+}
+
+func TestChainProviderWithNoValidProvider(t *testing.T) {
+ errs := []error{
+ errors.New("FirstError"),
+ errors.New("SecondError"),
+ }
+ p := &Chain{
+ Providers: []Provider{
+ &credProvider{err: errs[0]},
+ &credProvider{err: errs[1]},
+ },
+ }
+
+ if !p.IsExpired() {
+ t.Fatal("Expected to be expired with no providers")
+ }
+
+ _, err := p.Retrieve()
+ if err != nil {
+ if err.Error() != "No valid providers found [FirstError SecondError]" {
+ t.Error(err)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
new file mode 100644
index 000000000..130746f4b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
+{
+ "version": "8",
+ "hosts": {
+ "play": {
+ "url": "https://play.minio.io:9000",
+ "accessKey": "Q3AM3UQ867SPQQA43P2F",
+ "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ "api": "S3v2"
+ },
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "accessKey",
+ "secretKey": "secret",
+ "api": "S3v4"
+ }
+ }
+} \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
new file mode 100644
index 000000000..cc3000532
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
@@ -0,0 +1,175 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Signature Type.
+ SignerType SignatureType
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type IAMCredentialProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// Credentials - A container for synchronous safe retrieval of credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sync.Mutex
+
+ creds Value
+ forceRefresh bool
+ provider Provider
+}
+
+// New returns a pointer to a new Credentials with the provider set.
+func New(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be refreshed.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
new file mode 100644
index 000000000..7fc91d9d2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go
new file mode 100644
index 000000000..cbfb673b7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go
@@ -0,0 +1,73 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "errors"
+ "testing"
+)
+
+type credProvider struct {
+ creds Value
+ expired bool
+ err error
+}
+
+func (s *credProvider) Retrieve() (Value, error) {
+ s.expired = false
+ return s.creds, s.err
+}
+func (s *credProvider) IsExpired() bool {
+ return s.expired
+}
+
+func TestCredentialsGet(t *testing.T) {
+ c := New(&credProvider{
+ creds: Value{
+ AccessKeyID: "UXHW",
+ SecretAccessKey: "MYSECRET",
+ SessionToken: "",
+ },
+ expired: true,
+ })
+
+ creds, err := c.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if "UXHW" != creds.AccessKeyID {
+ t.Errorf("Expected \"UXHW\", got %s", creds.AccessKeyID)
+ }
+ if "MYSECRET" != creds.SecretAccessKey {
+ t.Errorf("Expected \"MYSECRET\", got %s", creds.SecretAccessKey)
+ }
+ if creds.SessionToken != "" {
+ t.Errorf("Expected session token to be empty, got %s", creds.SessionToken)
+ }
+}
+
+func TestCredentialsGetWithError(t *testing.T) {
+ c := New(&credProvider{err: errors.New("Custom error")})
+
+ _, err := c.Get()
+ if err != nil {
+ if err.Error() != "Custom error" {
+ t.Errorf("Expected \"Custom error\", got %s", err.Error())
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
new file mode 100644
index 000000000..fa1908aeb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
@@ -0,0 +1,45 @@
+// Package credentials provides credential retrieval and management
+// for S3 compatible object storage.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewFromEnv()
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewFromIAM("")
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
new file mode 100644
index 000000000..11934433c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvAWS retrieves credentials from the environment variables of the
+// running process. EnvAWSironment credentials never expire.
+//
+// EnvAWSironment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
+// * Secret Token: AWS_SESSION_TOKEN.
+type EnvAWS struct {
+ retrieved bool
+}
+
+// NewEnvAWS returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvAWS() *Credentials {
+ return New(&EnvAWS{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvAWS) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvAWS) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
new file mode 100644
index 000000000..791087ef5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvMinio retrieves credentials from the environment variables of the
+// running process. EnvMinioironment credentials never expire.
+//
+// EnvMinioironment variables used:
+//
+// * Access Key ID: MINIO_ACCESS_KEY.
+// * Secret Access Key: MINIO_SECRET_KEY.
+type EnvMinio struct {
+ retrieved bool
+}
+
+// NewEnvMinio returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvMinio() *Credentials {
+ return New(&EnvMinio{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvMinio) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("MINIO_ACCESS_KEY")
+ secret := os.Getenv("MINIO_SECRET_KEY")
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvMinio) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go
new file mode 100644
index 000000000..2f72bea40
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go
@@ -0,0 +1,105 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "reflect"
+ "testing"
+)
+
+func TestEnvAWSRetrieve(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+
+ e := EnvAWS{}
+ if !e.IsExpired() {
+ t.Error("Expect creds to be expired before retrieve.")
+ }
+
+ creds, err := e.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedCreds := Value{
+ AccessKeyID: "access",
+ SecretAccessKey: "secret",
+ SessionToken: "token",
+ SignerType: SignatureV4,
+ }
+ if !reflect.DeepEqual(creds, expectedCreds) {
+ t.Errorf("Expected %v, got %v", expectedCreds, creds)
+ }
+
+ if e.IsExpired() {
+ t.Error("Expect creds to not be expired after retrieve.")
+ }
+
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ os.Setenv("AWS_SECRET_KEY", "secret")
+
+ expectedCreds = Value{
+ AccessKeyID: "access",
+ SecretAccessKey: "secret",
+ SignerType: SignatureV4,
+ }
+
+ creds, err = e.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(creds, expectedCreds) {
+ t.Errorf("Expected %v, got %v", expectedCreds, creds)
+ }
+
+}
+
+func TestEnvMinioRetrieve(t *testing.T) {
+ os.Clearenv()
+
+ os.Setenv("MINIO_ACCESS_KEY", "access")
+ os.Setenv("MINIO_SECRET_KEY", "secret")
+
+ e := EnvMinio{}
+ if !e.IsExpired() {
+ t.Error("Expect creds to be expired before retrieve.")
+ }
+
+ creds, err := e.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedCreds := Value{
+ AccessKeyID: "access",
+ SecretAccessKey: "secret",
+ SignerType: SignatureV4,
+ }
+ if !reflect.DeepEqual(creds, expectedCreds) {
+ t.Errorf("Expected %v, got %v", expectedCreds, creds)
+ }
+
+ if e.IsExpired() {
+ t.Error("Expect creds to not be expired after retrieve.")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 000000000..1be621385
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,120 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+ homedir "github.com/minio/go-homedir"
+)
+
+// A FileAWSCredentials retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type FileAWSCredentials struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileAWSCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewFileAWSCredentials(filename string, profile string) *Credentials {
+ return New(&FileAWSCredentials{
+ filename: filename,
+ profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileAWSCredentials) Retrieve() (Value, error) {
+ if p.filename == "" {
+ p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if p.filename == "" {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+ }
+ if p.profile == "" {
+ p.profile = os.Getenv("AWS_PROFILE")
+ if p.profile == "" {
+ p.profile = "default"
+ }
+ }
+
+ p.retrieved = false
+
+ iniProfile, err := loadProfile(p.filename, p.profile)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Default to empty string if not found.
+ id := iniProfile.Key("aws_access_key_id")
+ // Default to empty string if not found.
+ secret := iniProfile.Key("aws_secret_access_key")
+ // Default to empty string if not found.
+ token := iniProfile.Key("aws_session_token")
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileAWSCredentials) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (*ini.Section, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return nil, err
+ }
+ return iniProfile, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
new file mode 100644
index 000000000..9e26dd302
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
@@ -0,0 +1,129 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ homedir "github.com/minio/go-homedir"
+)
+
+// A FileMinioClient retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Configuration file example: $HOME/.mc/config.json
+type FileMinioClient struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.mc/config.json"
+ // Windows: "%USERALIAS%\mc\config.json"
+ filename string
+
+ // Minio Alias to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "MINIO_ALIAS" or "default" if
+ // environment variable is also not set.
+ alias string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileMinioClient returns a pointer to a new Credentials object
+// wrapping the Alias file provider.
+func NewFileMinioClient(filename string, alias string) *Credentials {
+ return New(&FileMinioClient{
+ filename: filename,
+ alias: alias,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileMinioClient) Retrieve() (Value, error) {
+ if p.filename == "" {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".mc", "config.json")
+ if runtime.GOOS == "windows" {
+ p.filename = filepath.Join(homeDir, "mc", "config.json")
+ }
+ }
+
+ if p.alias == "" {
+ p.alias = os.Getenv("MINIO_ALIAS")
+ if p.alias == "" {
+ p.alias = "s3"
+ }
+ }
+
+ p.retrieved = false
+
+ hostCfg, err := loadAlias(p.filename, p.alias)
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: hostCfg.AccessKey,
+ SecretAccessKey: hostCfg.SecretKey,
+ SignerType: parseSignatureType(hostCfg.API),
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileMinioClient) IsExpired() bool {
+ return !p.retrieved
+}
+
+// hostConfig configuration of a host.
+type hostConfig struct {
+ URL string `json:"url"`
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ API string `json:"api"`
+}
+
+// config config version.
+type config struct {
+ Version string `json:"version"`
+ Hosts map[string]hostConfig `json:"hosts"`
+}
+
+// loadAliass loads from the file pointed to by shared credentials filename for alias.
+// The credentials retrieved from the alias will be returned or error. Error will be
+// returned if it fails to read from the file.
+func loadAlias(filename, alias string) (hostConfig, error) {
+ cfg := &config{}
+ configBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if err = json.Unmarshal(configBytes, cfg); err != nil {
+ return hostConfig{}, err
+ }
+ return cfg.Hosts[alias], nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go
new file mode 100644
index 000000000..c62c53365
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go
@@ -0,0 +1,189 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestFileAWS(t *testing.T) {
+ os.Clearenv()
+
+ creds := NewFileAWSCredentials("credentials.sample", "")
+ credValues, err := creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SessionToken != "token" {
+ t.Errorf("Expected 'token', got %s'", credValues.SessionToken)
+ }
+
+ os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "credentials.sample")
+ creds = NewFileAWSCredentials("", "")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SessionToken != "token" {
+ t.Errorf("Expected 'token', got %s'", credValues.SessionToken)
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "credentials.sample"))
+ creds = NewFileAWSCredentials("", "")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SessionToken != "token" {
+ t.Errorf("Expected 'token', got %s'", credValues.SessionToken)
+ }
+
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "no_token")
+
+ creds = NewFileAWSCredentials("credentials.sample", "")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+
+ os.Clearenv()
+
+ creds = NewFileAWSCredentials("credentials.sample", "no_token")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+
+ creds = NewFileAWSCredentials("credentials-non-existent.sample", "no_token")
+ _, err = creds.Get()
+ if !os.IsNotExist(err) {
+ t.Errorf("Expected open non-existent.json: no such file or directory, got %s", err)
+ }
+ if !creds.IsExpired() {
+ t.Error("Should be expired if not loaded")
+ }
+}
+
+func TestFileMinioClient(t *testing.T) {
+ os.Clearenv()
+
+ creds := NewFileMinioClient("config.json.sample", "")
+ credValues, err := creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "accessKey" {
+ t.Errorf("Expected 'accessKey', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "secret" {
+ t.Errorf("Expected 'secret', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SignerType != SignatureV4 {
+ t.Errorf("Expected 'S3v4', got %s'", credValues.SignerType)
+ }
+
+ os.Clearenv()
+ os.Setenv("MINIO_ALIAS", "play")
+
+ creds = NewFileMinioClient("config.json.sample", "")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "Q3AM3UQ867SPQQA43P2F" {
+ t.Errorf("Expected 'Q3AM3UQ867SPQQA43P2F', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" {
+ t.Errorf("Expected 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SignerType != SignatureV2 {
+ t.Errorf("Expected 'S3v2', got %s'", credValues.SignerType)
+ }
+
+ os.Clearenv()
+
+ creds = NewFileMinioClient("config.json.sample", "play")
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if credValues.AccessKeyID != "Q3AM3UQ867SPQQA43P2F" {
+ t.Errorf("Expected 'Q3AM3UQ867SPQQA43P2F', got %s'", credValues.AccessKeyID)
+ }
+ if credValues.SecretAccessKey != "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" {
+ t.Errorf("Expected 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG', got %s'", credValues.SecretAccessKey)
+ }
+ if credValues.SignerType != SignatureV2 {
+ t.Errorf("Expected 'S3v2', got %s'", credValues.SignerType)
+ }
+
+ creds = NewFileMinioClient("non-existent.json", "play")
+ _, err = creds.Get()
+ if !os.IsNotExist(err) {
+ t.Errorf("Expected open non-existent.json: no such file or directory, got %s", err)
+ }
+ if !creds.IsExpired() {
+ t.Error("Should be expired if not loaded")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
new file mode 100644
index 000000000..b862cf538
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
@@ -0,0 +1,227 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+)
+
+// DefaultExpiryWindow - Default expiry window.
+// ExpiryWindow will allow the credentials to trigger refreshing
+// prior to the credentials actually expiring. This is beneficial
+// so race conditions with expiring credentials do not cause
+// request to fail unexpectedly due to ExpiredTokenException exceptions.
+const DefaultExpiryWindow = time.Second * 10 // 10 secs
+
+// A IAM retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+type IAM struct {
+ Expiry
+
+ // Required http Client to use when connecting to IAM metadata service.
+ Client *http.Client
+
+ // Custom endpoint to fetch IAM role credentials.
+ endpoint string
+}
+
+// redirectHeaders copies all headers when following a redirect URL.
+// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
+func redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) == 0 {
+ return nil
+ }
+ for key, val := range via[0].Header {
+ req.Header[key] = val
+ }
+ return nil
+}
+
+// IAM Roles for Amazon EC2
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+const (
+ defaultIAMRoleEndpoint = "http://169.254.169.254"
+ defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
+)
+
+// NewIAM returns a pointer to a new Credentials object wrapping
+// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewIAM(endpoint string) *Credentials {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ p := &IAM{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ CheckRedirect: redirectHeaders,
+ },
+ endpoint: endpoint,
+ }
+ return New(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired
+func (m *IAM) Retrieve() (Value, error) {
+ roleCreds, err := getCredentials(m.Client, m.endpoint)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+
+ // Unused params.
+ LastUpdated time.Time
+ Type string
+}
+
+// Get the final IAM role URL where the request will
+// be sent to fetch the rolling access credentials.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func getIAMRoleURL(endpoint string) (*url.URL, error) {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = defaultIAMSecurityCredsPath
+ return u, nil
+}
+
+// listRoleNames lists of credential role names associated
+// with the current EC2 service. If there are no credentials,
+// or there is an error making or receiving the request.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(resp.Status)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return credsList, nil
+}
+
+// getCredentials - obtains the credentials from the IAM role name associated with
+// the current EC2 service.
+//
+// If the credentials cannot be found, or there is an error
+// reading the response an error will be returned.
+func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ u, err := getIAMRoleURL(endpoint)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ roleNames, err := listRoleNames(client, u)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if len(roleNames) == 0 {
+ return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // - An instance profile can contain only one IAM role. This limit cannot be increased.
+ roleName := roleNames[0]
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // The following command retrieves the security credentials for an
+ // IAM role named `s3access`.
+ //
+ // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
+ //
+ u.Path = path.Join(u.Path, roleName)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go
new file mode 100644
index 000000000..3e5ad3ec0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go
@@ -0,0 +1,180 @@
+package credentials
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+const credsRespTmpl = `{
+ "Code": "Success",
+ "Type": "AWS-HMAC",
+ "AccessKeyId" : "accessKey",
+ "SecretAccessKey" : "secret",
+ "Token" : "token",
+ "Expiration" : "%s",
+ "LastUpdated" : "2009-11-23T0:00:00Z"
+}`
+
+const credsFailRespTmpl = `{
+ "Code": "ErrorCode",
+ "Message": "ErrorMsg",
+ "LastUpdated": "2009-11-23T0:00:00Z"
+}`
+
+func initTestFailServer() *httptest.Server {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "Not allowed", http.StatusBadRequest)
+ }))
+ return server
+}
+
+func initTestServerNoRoles() *httptest.Server {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(""))
+ }))
+ return server
+}
+
+func initTestServer(expireOn string, failAssume bool) *httptest.Server {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/latest/meta-data/iam/security-credentials" {
+ fmt.Fprintln(w, "RoleName")
+ } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" {
+ if failAssume {
+ fmt.Fprintf(w, credsFailRespTmpl)
+ } else {
+ fmt.Fprintf(w, credsRespTmpl, expireOn)
+ }
+ } else {
+ http.Error(w, "bad request", http.StatusBadRequest)
+ }
+ }))
+
+ return server
+}
+
+func TestIAMMalformedEndpoint(t *testing.T) {
+ creds := NewIAM("%%%%")
+ _, err := creds.Get()
+ if err == nil {
+ t.Fatal("Unexpected should fail here")
+ }
+ if err.Error() != `parse %%%%: invalid URL escape "%%%"` {
+ t.Fatalf("Expected parse %%%%%%%%: invalid URL escape \"%%%%%%\", got %s", err)
+ }
+}
+
+func TestIAMFailServer(t *testing.T) {
+ server := initTestFailServer()
+ defer server.Close()
+
+ creds := NewIAM(server.URL)
+
+ _, err := creds.Get()
+ if err == nil {
+ t.Fatal("Unexpected should fail here")
+ }
+ if err.Error() != "400 Bad Request" {
+ t.Fatalf("Expected '400 Bad Request', got %s", err)
+ }
+}
+
+func TestIAMNoRoles(t *testing.T) {
+ server := initTestServerNoRoles()
+ defer server.Close()
+
+ creds := NewIAM(server.URL)
+ _, err := creds.Get()
+ if err == nil {
+ t.Fatal("Unexpected should fail here")
+ }
+ if err.Error() != "No IAM roles attached to this EC2 service" {
+ t.Fatalf("Expected 'No IAM roles attached to this EC2 service', got %s", err)
+ }
+}
+
+func TestIAM(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &IAM{
+ Client: http.DefaultClient,
+ endpoint: server.URL,
+ }
+
+ creds, err := p.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if "accessKey" != creds.AccessKeyID {
+ t.Errorf("Expected \"accessKey\", got %s", creds.AccessKeyID)
+ }
+
+ if "secret" != creds.SecretAccessKey {
+ t.Errorf("Expected \"secret\", got %s", creds.SecretAccessKey)
+ }
+
+ if "token" != creds.SessionToken {
+ t.Errorf("Expected \"token\", got %s", creds.SessionToken)
+ }
+
+ if !p.IsExpired() {
+ t.Error("Expected creds to be expired.")
+ }
+}
+
+func TestIAMFailAssume(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", true)
+ defer server.Close()
+
+ p := &IAM{
+ Client: http.DefaultClient,
+ endpoint: server.URL,
+ }
+
+ _, err := p.Retrieve()
+ if err == nil {
+ t.Fatal("Unexpected success, should fail")
+ }
+ if err.Error() != "ErrorMsg" {
+ t.Errorf("Expected \"ErrorMsg\", got %s", err)
+ }
+}
+
+func TestIAMIsExpired(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &IAM{
+ Client: http.DefaultClient,
+ endpoint: server.URL,
+ }
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ if !p.IsExpired() {
+ t.Error("Expected creds to be expired before retrieve.")
+ }
+
+ _, err := p.Retrieve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.IsExpired() {
+ t.Error("Expected creds to not be expired after retrieve.")
+ }
+
+ p.CurrentTime = func() time.Time {
+ return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ if !p.IsExpired() {
+ t.Error("Expected creds to be expired when curren time has changed")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
new file mode 100644
index 000000000..c64ad6c23
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
@@ -0,0 +1,76 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "strings"
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
+const (
+ // SignatureDefault is always set to v4.
+ SignatureDefault SignatureType = iota
+ SignatureV4
+ SignatureV2
+ SignatureV4Streaming
+ SignatureAnonymous // Anonymous signature signifies, no signature.
+)
+
+// IsV2 - is signature SignatureV2?
+func (s SignatureType) IsV2() bool {
+ return s == SignatureV2
+}
+
+// IsV4 - is signature SignatureV4?
+func (s SignatureType) IsV4() bool {
+ return s == SignatureV4 || s == SignatureDefault
+}
+
+// IsStreamingV4 - is signature SignatureV4Streaming?
+func (s SignatureType) IsStreamingV4() bool {
+ return s == SignatureV4Streaming
+}
+
+// IsAnonymous - is signature empty?
+func (s SignatureType) IsAnonymous() bool {
+ return s == SignatureAnonymous
+}
+
+// Stringer humanized version of signature type,
+// strings returned here are case insensitive.
+func (s SignatureType) String() string {
+ if s.IsV2() {
+ return "S3v2"
+ } else if s.IsV4() {
+ return "S3v4"
+ } else if s.IsStreamingV4() {
+ return "S3v4Streaming"
+ }
+ return "Anonymous"
+}
+
+func parseSignatureType(str string) SignatureType {
+ if strings.EqualFold(str, "S3v4") {
+ return SignatureV4
+ } else if strings.EqualFold(str, "S3v2") {
+ return SignatureV2
+ } else if strings.EqualFold(str, "S3v4Streaming") {
+ return SignatureV4Streaming
+ }
+ return SignatureAnonymous
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
new file mode 100644
index 000000000..25aff5696
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
@@ -0,0 +1,67 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Static is a set of credentials which are set programmatically,
+// and will never expire.
+type Static struct {
+ Value
+}
+
+// NewStaticV2 returns a pointer to a new Credentials object
+// wrapping a static credentials value provider, signature is
+// set to v2. If access and secret are not specified then
+// regardless of signature type set it Value will return
+// as anonymous.
+func NewStaticV2(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV2)
+}
+
+// NewStaticV4 is similar to NewStaticV2 with similar considerations.
+func NewStaticV4(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV4)
+}
+
+// NewStatic returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
+ return New(&Static{
+ Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ SignerType: signerType,
+ },
+ })
+}
+
+// Retrieve returns the static credentials.
+func (s *Static) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ // Anonymous is not an error
+ return Value{SignerType: SignatureAnonymous}, nil
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For Static, the credentials never expired.
+func (s *Static) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go
new file mode 100644
index 000000000..491b1554b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go
@@ -0,0 +1,68 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "testing"
+
+func TestStaticGet(t *testing.T) {
+ creds := NewStatic("UXHW", "SECRET", "", SignatureV4)
+ credValues, err := creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if "UXHW" != credValues.AccessKeyID {
+ t.Errorf("Expected access key ID to match \"UXHW\", got %s", credValues.AccessKeyID)
+ }
+ if "SECRET" != credValues.SecretAccessKey {
+ t.Errorf("Expected secret access key to match \"SECRET\", got %s", credValues.SecretAccessKey)
+ }
+
+ if credValues.SessionToken != "" {
+ t.Error("Expected session token to match")
+ }
+
+ if credValues.SignerType != SignatureV4 {
+ t.Errorf("Expected 'S3v4', got %s", credValues.SignerType)
+ }
+
+ if creds.IsExpired() {
+ t.Error("Static credentials should never expire")
+ }
+
+ creds = NewStatic("", "", "", SignatureDefault)
+ credValues, err = creds.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if "" != credValues.AccessKeyID {
+ t.Errorf("Expected access key ID to match empty string, got %s", credValues.AccessKeyID)
+ }
+ if "" != credValues.SecretAccessKey {
+ t.Errorf("Expected secret access key to match empty string, got %s", credValues.SecretAccessKey)
+ }
+
+ if !credValues.SignerType.IsAnonymous() {
+ t.Errorf("Expected 'Anonymous', got %s", credValues.SignerType)
+ }
+
+ if creds.IsExpired() {
+ t.Error("Static credentials should never expire")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
index 7670e68f4..be45e52f4 100644
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
@@ -89,6 +89,15 @@ func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
}
+// Close implements closes the internal stream.
+func (s *CBCSecureMaterials) Close() error {
+ closer, ok := s.stream.(io.Closer)
+ if ok {
+ return closer.Close()
+ }
+ return nil
+}
+
// SetupEncryptMode - tells CBC that we are going to encrypt data
func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
// Set mode to encrypt
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
index 2fd75033f..8b8554336 100644
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
@@ -25,6 +25,9 @@ import "io"
// Materials - provides generic interface to encrypt any stream of data.
type Materials interface {
+ // Closes the wrapped stream properly, initiated by the caller.
+ Close() error
+
// Returns encrypted/decrypted data, io.Reader compatible.
Read(b []byte) (int, error)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
index 755fd1ac5..22059bb1d 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
@@ -21,6 +21,7 @@ import (
"encoding/hex"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"strconv"
"strings"
@@ -92,9 +93,12 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
// prepareStreamingRequest - prepares a request with appropriate
// headers before computing the seed signature.
-func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) {
+func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
// Set x-amz-content-sha256 header.
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
req.Header.Set("Content-Encoding", streamingEncoding)
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
@@ -138,6 +142,7 @@ func (s *StreamingReader) setSeedSignature(req *http.Request) {
type StreamingReader struct {
accessKeyID string
secretAccessKey string
+ sessionToken string
region string
prevSignature string
seedSignature string
@@ -195,16 +200,21 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
-func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey,
+func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
region string, dataLen int64, reqTime time.Time) *http.Request {
// Set headers needed for streaming signature.
- prepareStreamingRequest(req, dataLen, reqTime)
+ prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
+ }
stReader := &StreamingReader{
baseReadCloser: req.Body,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
+ sessionToken: sessionToken,
region: region,
reqTime: reqTime,
chunkBuf: make([]byte, payloadChunkSize),
@@ -244,7 +254,18 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
s.chunkBufLen = 0
for {
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
- if err == nil || err == io.ErrUnexpectedEOF {
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
s.chunkBufLen += n1
s.bytesRead += int64(n1)
@@ -255,25 +276,26 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
s.signChunk(s.chunkBufLen)
break
}
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, io.ErrUnexpectedEOF
+ }
- } else if err == io.EOF {
- // No more data left in baseReader - last chunk.
- // Done reading the last chunk from baseReader.
- s.done = true
-
- // bytes read from baseReader different than
- // content length provided.
- if s.bytesRead != s.contentLen {
- return 0, io.ErrUnexpectedEOF
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(0)
+ break
}
-
- // Sign the chunk and write it to s.buf.
- s.signChunk(0)
- break
-
- } else {
return 0, err
}
+
}
}
return s.buf.Read(buf)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
index 084a0dbab..1f49f2234 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
@@ -39,7 +39,7 @@ func TestGetSeedSignature(t *testing.T) {
t.Fatalf("Failed to parse time - %v", err)
}
- req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "us-east-1", int64(dataLen), reqTime)
+ req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", "us-east-1", int64(dataLen), reqTime)
actualSeedSignature := req.Body.(*StreamingReader).seedSignature
expectedSeedSignature := "007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
@@ -72,7 +72,7 @@ func TestSetStreamingAuthorization(t *testing.T) {
dataLen := int64(65 * 1024)
reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z")
- req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, location, dataLen, reqTime)
+ req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime)
expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
@@ -96,7 +96,7 @@ func TestStreamingReader(t *testing.T) {
baseReader := ioutil.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), 65*1024)))
req.Body = baseReader
- req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, location, dataLen, reqTime)
+ req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime)
b, err := ioutil.ReadAll(req.Body)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
index 245fb08c3..0d75dc162 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -206,7 +206,7 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
-func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -228,6 +228,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential)
+ // Set session token if available.
+ if sessionToken != "" {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
req.URL.RawQuery = query.Encode()
// Get canonical request.
@@ -260,7 +264,7 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
-func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -272,6 +276,11 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ // Set session token if available.
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
// Get canonical request.
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
index 6f5ba1895..85ff063df 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
@@ -28,12 +28,12 @@ func TestSignatureCalculation(t *testing.T) {
if err != nil {
t.Fatal("Error:", err)
}
- req = SignV4(*req, "", "", "us-east-1")
+ req = SignV4(*req, "", "", "", "us-east-1")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
- req = PreSignV4(*req, "", "", "us-east-1", 0)
+ req = PreSignV4(*req, "", "", "", "us-east-1", 0)
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
@@ -48,12 +48,12 @@ func TestSignatureCalculation(t *testing.T) {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
- req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
+ req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "", "us-east-1")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
- req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
+ req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "", "us-east-1", 0)
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: normal credentials should have Signature query resource.")
}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
index a3b6ed845..9d6ac4d81 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -19,6 +19,7 @@ package s3utils
import (
"bytes"
"encoding/hex"
+ "errors"
"net"
"net/url"
"regexp"
@@ -84,10 +85,29 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
return true
}
-
+ if IsAmazonGovCloudEndpoint(endpointURL) {
+ return true
+ }
return endpointURL.Host == "s3.amazonaws.com"
}
+// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
+func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
+ IsAmazonFIPSGovCloudEndpoint(endpointURL))
+}
+
+// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
+func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
+}
+
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required
// to sign up for a separate set of account credentials unique to
@@ -181,3 +201,74 @@ func EncodePath(pathName string) string {
}
return encodedPathname
}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var (
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`)
+ validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+ ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+)
+
+// Common checker for both stricter and basic validation.
+func checkBucketNameCommon(bucketName string, strict bool) (err error) {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket name cannot be empty")
+ }
+ if len(bucketName) < 3 {
+ return errors.New("Bucket name cannot be smaller than 3 characters")
+ }
+ if len(bucketName) > 63 {
+ return errors.New("Bucket name cannot be greater than 63 characters")
+ }
+ if ipAddress.MatchString(bucketName) {
+ return errors.New("Bucket name cannot be an ip address")
+ }
+ if strings.Contains(bucketName, "..") {
+ return errors.New("Bucket name contains invalid characters")
+ }
+ if strict {
+ if !validBucketNameStrict.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+ }
+ if !validBucketName.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+}
+
+// CheckValidBucketName - checks if we have a valid input bucket name.
+// This is a non stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func CheckValidBucketName(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, false)
+}
+
+// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
+// This is a stricter version.
+func CheckValidBucketNameStrict(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, true)
+}
+
+// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectNamePrefix(objectName string) error {
+ if len(objectName) > 1024 {
+ return errors.New("Object name cannot be greater than 1024 characters")
+ }
+ if !utf8.ValidString(objectName) {
+ return errors.New("Object name with non UTF-8 strings are not supported")
+ }
+ return nil
+}
+
+// CheckValidObjectName - checks if we have a valid input object name.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return errors.New("Object name cannot be empty")
+ }
+ return CheckValidObjectNamePrefix(objectName)
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
index f790861cd..6be701d18 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
@@ -17,6 +17,7 @@
package s3utils
import (
+ "errors"
"net/url"
"testing"
)
@@ -282,3 +283,87 @@ func TestEncodePath(t *testing.T) {
}
}
}
+
+// Tests validate the bucket name validator.
+func TestIsValidBucketName(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ bucketName string
+ // Expected result.
+ err error
+ // Flag to indicate whether test should Pass.
+ shouldPass bool
+ }{
+ {".mybucket", errors.New("Bucket name contains invalid characters"), false},
+ {"$mybucket", errors.New("Bucket name contains invalid characters"), false},
+ {"mybucket-", errors.New("Bucket name contains invalid characters"), false},
+ {"my", errors.New("Bucket name cannot be smaller than 3 characters"), false},
+ {"", errors.New("Bucket name cannot be empty"), false},
+ {"my..bucket", errors.New("Bucket name contains invalid characters"), false},
+ {"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false},
+ {"my.bucket.com", nil, true},
+ {"my-bucket", nil, true},
+ {"123my-bucket", nil, true},
+ {"Mybucket", nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := CheckValidBucketName(testCase.bucketName)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+
+}
+
+// Tests validate the bucket name validator stricter.
+func TestIsValidBucketNameStrict(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ bucketName string
+ // Expected result.
+ err error
+ // Flag to indicate whether test should Pass.
+ shouldPass bool
+ }{
+ {".mybucket", errors.New("Bucket name contains invalid characters"), false},
+ {"$mybucket", errors.New("Bucket name contains invalid characters"), false},
+ {"mybucket-", errors.New("Bucket name contains invalid characters"), false},
+ {"my", errors.New("Bucket name cannot be smaller than 3 characters"), false},
+ {"", errors.New("Bucket name cannot be empty"), false},
+ {"my..bucket", errors.New("Bucket name contains invalid characters"), false},
+ {"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false},
+ {"Mybucket", errors.New("Bucket name contains invalid characters"), false},
+ {"my.bucket.com", nil, true},
+ {"my-bucket", nil, true},
+ {"123my-bucket", nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := CheckValidBucketNameStrict(testCase.bucketName)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+
+}
diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/request-headers.go
index fa23b2fe3..76c87202d 100644
--- a/vendor/github.com/minio/minio-go/request-headers.go
+++ b/vendor/github.com/minio/minio-go/request-headers.go
@@ -48,7 +48,7 @@ func (c RequestHeaders) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-Match", etag)
+ c.Set("If-Match", "\""+etag+"\"")
return nil
}
@@ -57,7 +57,7 @@ func (c RequestHeaders) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-None-Match", etag)
+ c.Set("If-None-Match", "\""+etag+"\"")
return nil
}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
index d7fa5e038..c02f3f1fa 100644
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -33,6 +33,7 @@ var awsS3EndpointMap = map[string]string{
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
+ "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
}
diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go
index 11b40a0f8..c5aff9bbc 100644
--- a/vendor/github.com/minio/minio-go/s3-error.go
+++ b/vendor/github.com/minio/minio-go/s3-error.go
@@ -25,7 +25,7 @@ var s3ErrorResponseMap = map[string]string{
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
"InternalError": "We encountered an internal error, please try again.",
- "InvalidAccessKeyID": "The access key ID you provided does not exist in our records.",
+ "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
"InvalidBucketName": "The specified bucket is not valid.",
"InvalidDigest": "The Content-Md5 you specified is not valid.",
"InvalidRange": "The requested range is not satisfiable",
diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go
deleted file mode 100644
index f9a57c3f1..000000000
--- a/vendor/github.com/minio/minio-go/signature-type.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-// SignatureType is type of Authorization requested for a given HTTP request.
-type SignatureType int
-
-// Different types of supported signatures - default is Latest i.e SignatureV4.
-const (
- Latest SignatureType = iota
- SignatureV4
- SignatureV2
- SignatureV4Streaming
-)
-
-var emptySHA256 = sum256(nil)
-
-// isV2 - is signature SignatureV2?
-func (s SignatureType) isV2() bool {
- return s == SignatureV2
-}
-
-// isV4 - is signature SignatureV4?
-func (s SignatureType) isV4() bool {
- return s == SignatureV4 || s == Latest
-}
-
-// isStreamingV4 - is signature SignatureV4Streaming?
-func (s SignatureType) isStreamingV4() bool {
- return s == SignatureV4Streaming
-}
diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go
deleted file mode 100644
index 65c7b0da1..000000000
--- a/vendor/github.com/minio/minio-go/tempfile.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "io/ioutil"
- "os"
- "sync"
-)
-
-// tempFile - temporary file container.
-type tempFile struct {
- *os.File
- mutex *sync.Mutex
-}
-
-// newTempFile returns a new temporary file, once closed it automatically deletes itself.
-func newTempFile(prefix string) (*tempFile, error) {
- // use platform specific temp directory.
- file, err := ioutil.TempFile(os.TempDir(), prefix)
- if err != nil {
- return nil, err
- }
- return &tempFile{
- File: file,
- mutex: &sync.Mutex{},
- }, nil
-}
-
-// Close - closer wrapper to close and remove temporary file.
-func (t *tempFile) Close() error {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- if t.File != nil {
- // Close the file.
- if err := t.File.Close(); err != nil {
- return err
- }
- // Remove file.
- if err := os.Remove(t.File.Name()); err != nil {
- return err
- }
- t.File = nil
- }
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 93cd1712f..6f54639e0 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -28,7 +28,6 @@ import (
"regexp"
"strings"
"time"
- "unicode/utf8"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -110,6 +109,8 @@ func closeResponse(resp *http.Response) {
}
}
+var emptySHA256 = sum256(nil)
+
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
@@ -121,7 +122,7 @@ func isValidEndpointURL(endpointURL url.URL) error {
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
- if strings.Contains(endpointURL.Host, ".amazonaws.com") {
+ if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
@@ -146,63 +147,6 @@ func isValidExpiry(expires time.Duration) error {
return nil
}
-// We support '.' with bucket names but we fallback to using path
-// style requests instead for such buckets.
-var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
-
-// Invalid bucket name with double dot.
-var invalidDotBucketName = regexp.MustCompile(`\.\.`)
-
-// isValidBucketName - verify bucket name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
-func isValidBucketName(bucketName string) error {
- if strings.TrimSpace(bucketName) == "" {
- return ErrInvalidBucketName("Bucket name cannot be empty.")
- }
- if len(bucketName) < 3 {
- return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
- }
- if len(bucketName) > 63 {
- return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
- }
- if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
- return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
- }
- if invalidDotBucketName.MatchString(bucketName) {
- return ErrInvalidBucketName("Bucket name cannot have successive periods.")
- }
- if !validBucketName.MatchString(bucketName) {
- return ErrInvalidBucketName("Bucket name contains invalid characters.")
- }
- return nil
-}
-
-// isValidObjectName - verify object name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
-func isValidObjectName(objectName string) error {
- if strings.TrimSpace(objectName) == "" {
- return ErrInvalidObjectName("Object name cannot be empty.")
- }
- if len(objectName) > 1024 {
- return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectName) {
- return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
- }
- return nil
-}
-
-// isValidObjectPrefix - verify if object prefix is valid.
-func isValidObjectPrefix(objectPrefix string) error {
- if len(objectPrefix) > 1024 {
- return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectPrefix) {
- return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
- }
- return nil
-}
-
// make a copy of http.Header
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
@@ -225,3 +169,46 @@ func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.
}
return filteredHeader
}
+
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
+
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
+// Redact out signature value from authorization string.
+func redactSignature(origAuth string) string {
+ if !strings.HasPrefix(origAuth, signV4Algorithm) {
+ // Set a temporary redacted auth
+ return "AWS **REDACTED**:**REDACTED**"
+ }
+
+ /// Signature V4 authorization header.
+
+ // Strip out accessKeyID from:
+ // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
+ newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
+
+ // Strip out 256-bit signature from: Signature=<256-bit signature>
+ return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
+}
+
+// Get default location returns the location based on the input
+// URL `u`, if region override is provided then all location
+// defaults to regionOverride.
+//
+// If no other cases match then the location is set to `us-east-1`
+// as a last resort.
+func getDefaultLocation(u url.URL, regionOverride string) (location string) {
+ if regionOverride != "" {
+ return regionOverride
+ }
+ if s3utils.IsAmazonChinaEndpoint(u) {
+ return "cn-north-1"
+ }
+ if s3utils.IsAmazonGovCloudEndpoint(u) {
+ return "us-gov-west-1"
+ }
+ // Default to location to 'us-east-1'.
+ return "us-east-1"
+}
diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go
index 4e015c855..ba297112e 100644
--- a/vendor/github.com/minio/minio-go/utils_test.go
+++ b/vendor/github.com/minio/minio-go/utils_test.go
@@ -21,8 +21,35 @@ import (
"net/url"
"testing"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
+// Tests signature redacting function used
+// in filtering on-wire Authorization header.
+func TestRedactSignature(t *testing.T) {
+ testCases := []struct {
+ authValue string
+ expectedRedactedAuthValue string
+ }{
+ {
+ authValue: "AWS 1231313:888x000231==",
+ expectedRedactedAuthValue: "AWS **REDACTED**:**REDACTED**",
+ },
+ {
+ authValue: "AWS4-HMAC-SHA256 Credential=12312313/20170613/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=02131231312313213",
+ expectedRedactedAuthValue: "AWS4-HMAC-SHA256 Credential=**REDACTED**/20170613/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=**REDACTED**",
+ },
+ }
+
+ for i, testCase := range testCases {
+ redactedAuthValue := redactSignature(testCase.authValue)
+ if redactedAuthValue != testCase.expectedRedactedAuthValue {
+ t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedRedactedAuthValue, redactedAuthValue)
+ }
+ }
+}
+
// Tests filter header function by filtering out
// some custom header keys.
func TestFilterHeader(t *testing.T) {
@@ -57,9 +84,9 @@ func TestGetEndpointURL(t *testing.T) {
{"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
{"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
{"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
+ {"s3.amazonaws.com:443", true, "https://s3.amazonaws.com:443", nil, true},
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
- {"s3.amazonaws.com:443", true, "", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
{"storage.googleapis.com:4000", true, "", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
{"s3.aamzza.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-")), false},
{"", true, "", ErrInvalidArgument("Endpoint: does not follow ip address or domain name standards."), false},
@@ -99,14 +126,17 @@ func TestIsValidEndpointURL(t *testing.T) {
}{
{"", ErrInvalidArgument("Endpoint url cannot be empty."), false},
{"/", nil, true},
- {"https://s3.am1;4205;0cazonaws.com", nil, true},
+ {"https://s3.amazonaws.com", nil, true},
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"https://s3-us-gov-west-1.amazonaws.com", nil, true},
+ {"https://s3-fips-us-gov-west-1.amazonaws.com", nil, true},
{"https://s3.amazonaws.com/", nil, true},
{"https://storage.googleapis.com/", nil, true},
+ {"https://z3.amazonaws.com", nil, true},
+ {"https://mybalancer.us-east-1.elb.amazonaws.com", nil, true},
{"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
{"https://amazon.googleapis.com/", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
{"https://storage.googleapis.com/bucket/", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
- {"https://z3.amazonaws.com", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
{"https://s3.amazonaws.com/bucket/object", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
}
@@ -138,6 +168,52 @@ func TestIsValidEndpointURL(t *testing.T) {
}
}
+func TestDefaultBucketLocation(t *testing.T) {
+ testCases := []struct {
+ endpointURL url.URL
+ regionOverride string
+ expectedLocation string
+ }{
+ // Region override is set URL is ignored. - Test 1.
+ {
+ endpointURL: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
+ regionOverride: "us-west-1",
+ expectedLocation: "us-west-1",
+ },
+ // No region override, url based preferenced is honored - Test 2.
+ {
+ endpointURL: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
+ regionOverride: "",
+ expectedLocation: "us-gov-west-1",
+ },
+ // Region override is honored - Test 3.
+ {
+ endpointURL: url.URL{Host: "s3.amazonaws.com"},
+ regionOverride: "us-west-1",
+ expectedLocation: "us-west-1",
+ },
+ // China region should be honored, region override not provided. - Test 4.
+ {
+ endpointURL: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
+ regionOverride: "",
+ expectedLocation: "cn-north-1",
+ },
+ // No region provided, no standard region strings provided as well. - Test 5.
+ {
+ endpointURL: url.URL{Host: "s3.amazonaws.com"},
+ regionOverride: "",
+ expectedLocation: "us-east-1",
+ },
+ }
+
+ for i, testCase := range testCases {
+ retLocation := getDefaultLocation(testCase.endpointURL, testCase.regionOverride)
+ if testCase.expectedLocation != retLocation {
+ t.Errorf("Test %d: Expected location %s, got %s", i+1, testCase.expectedLocation, retLocation)
+ }
+ }
+}
+
// Tests validate the expiry time validator.
func TestIsValidExpiry(t *testing.T) {
testCases := []struct {
@@ -184,19 +260,19 @@ func TestIsValidBucketName(t *testing.T) {
// Flag to indicate whether test should Pass.
shouldPass bool
}{
- {".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
- {"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
- {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters."), false},
- {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
- {"", ErrInvalidBucketName("Bucket name cannot be empty."), false},
- {"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
+ {".mybucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
+ {"mybucket.", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
+ {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
+ {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters"), false},
+ {"", ErrInvalidBucketName("Bucket name cannot be empty"), false},
+ {"my..bucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
{"my.bucket.com", nil, true},
{"my-bucket", nil, true},
{"123my-bucket", nil, true},
}
for i, testCase := range testCases {
- err := isValidBucketName(testCase.bucketName)
+ err := s3utils.CheckValidBucketName(testCase.bucketName)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
}