summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-04-24 20:11:36 -0400
committerJoram Wilander <jwawilander@gmail.com>2017-04-24 20:11:36 -0400
commitf5437632f486b7d0a0a181c58f113c86d032b02c (patch)
tree407388e3003a210a89f4b2128d7ad656f8b79d26 /vendor/github.com/minio
parent7f68a60f8c228d5604e0566bf84cabb145d16c37 (diff)
downloadchat-f5437632f486b7d0a0a181c58f113c86d032b02c.tar.gz
chat-f5437632f486b7d0a0a181c58f113c86d032b02c.tar.bz2
chat-f5437632f486b7d0a0a181c58f113c86d032b02c.zip
Upgrading server dependancies (#6215)
Diffstat (limited to 'vendor/github.com/minio')
-rw-r--r--vendor/github.com/minio/minio-go/README.md3
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response.go10
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go37
-rw-r--r--vendor/github.com/minio/minio-go/api.go43
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v2_test.go2
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v4_test.go153
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go7
-rw-r--r--vendor/github.com/minio/minio-go/copy-conditions.go10
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md89
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/copyobject.go2
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go56
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go12
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset.go8
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset_test.go25
14 files changed, 295 insertions, 162 deletions
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index f0d880b1e..fb94f8010 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -1,4 +1,5 @@
-# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
+
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
index bcfad3761..fee3c7d53 100644
--- a/vendor/github.com/minio/minio-go/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -149,6 +149,16 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
return errResp
}
+// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
+func ErrTransferAccelerationBucket(bucketName string) error {
+ msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
+ return ErrorResponse{
+ Code: "InvalidArgument",
+ Message: msg,
+ BucketName: bucketName,
+ }
+}
+
// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ā€˜%dā€™ exceeds the maximum allowed object size ā€˜%dā€™ for single PUT operation.", totalSize, maxObjectSize)
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index c9b4dcedd..48ee947ff 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -212,10 +212,12 @@ type Object struct {
reqCh chan<- getRequest
resCh <-chan getResponse
doneCh chan<- struct{}
- prevOffset int64
currOffset int64
objectInfo ObjectInfo
+ // Ask lower level to initiate data fetching based on currOffset
+ seekData bool
+
// Keeps track of closed call.
isClosed bool
@@ -258,6 +260,10 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
if response.Error != nil {
return response, response.Error
}
+
+ // Data are ready on the wire, no need to reinitiate connection in lower level
+ o.seekData = false
+
return response, nil
}
@@ -266,8 +272,6 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
func (o *Object) setOffset(bytesRead int64) error {
// Update the currentOffset.
o.currOffset += bytesRead
- // Save the current offset as previous offset.
- o.prevOffset = o.currOffset
if o.currOffset >= o.objectInfo.Size {
return io.EOF
@@ -303,22 +307,9 @@ func (o *Object) Read(b []byte) (n int, err error) {
readReq.isFirstReq = true
}
- // Verify if offset has changed and currOffset is greater than
- // previous offset. Perhaps due to Seek().
- offsetChange := o.prevOffset - o.currOffset
- if offsetChange < 0 {
- offsetChange = -offsetChange
- }
- if offsetChange > 0 {
- // Fetch the new reader at the current offset again.
- readReq.Offset = o.currOffset
- readReq.DidOffsetChange = true
- } else {
- // No offset changes no need to fetch new reader, continue
- // reading.
- readReq.DidOffsetChange = false
- readReq.Offset = 0
- }
+ // Ask to establish a new data fetch routine based on seekData flag
+ readReq.DidOffsetChange = o.seekData
+ readReq.Offset = o.currOffset
// Send and receive from the first request.
response, err := o.doGetRequest(readReq)
@@ -430,8 +421,6 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if !o.objectInfoSet {
// Update the currentOffset.
o.currOffset += bytesRead
- // Save the current offset as previous offset.
- o.prevOffset = o.currOffset
} else {
// If this was not the first request update
// the offsets and compare against objectInfo
@@ -492,8 +481,6 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
return 0, err
}
}
- // Save current offset as previous offset.
- o.prevOffset = o.currOffset
// Switch through whence.
switch whence {
@@ -527,6 +514,10 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.prevErr == io.EOF {
o.prevErr = nil
}
+
+ // Ask lower level to fetch again from source
+ o.seekData = true
+
// Return the effective offset.
return o.currOffset, nil
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 98829cd2c..a21c40e80 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -71,6 +71,9 @@ type Client struct {
isTraceEnabled bool
traceOutput io.Writer
+ // S3 specific accelerated endpoint.
+ s3AccelerateEndpoint string
+
// Random seed.
random *rand.Rand
}
@@ -78,7 +81,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "2.0.3"
+ libraryVersion = "2.0.4"
)
// User Agent should always following the below style.
@@ -206,8 +209,7 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
- // if app name and version is not set, we do not a new user
- // agent.
+ // if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
@@ -258,8 +260,18 @@ func (c *Client) TraceOff() {
c.isTraceEnabled = false
}
-// requestMetadata - is container for all the values to make a
-// request.
+// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
+// requests. This feature is only specific to S3 for all other endpoints this
+// function does nothing. To read further details on s3 transfer acceleration
+// please vist -
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) {
+ c.s3AccelerateEndpoint = accelerateEndpoint
+ }
+}
+
+// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
@@ -601,10 +613,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Body = ioutil.NopCloser(metadata.contentBody)
}
- // FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
+ // FIXME: Enable this when Google Cloud Storage properly supports 100-continue.
// Skip setting 'expect' header for Google Cloud Storage, there
// are some known issues - https://github.com/restic/restic/issues/520
- if !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ if !s3utils.IsGoogleEndpoint(c.endpointURL) && c.s3AccelerateEndpoint == "" {
// Set 'Expect' header for the request.
req.Header.Set("Expect", "100-continue")
}
@@ -672,9 +684,22 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(c.endpointURL) {
- // Fetch new host based on the bucket location.
- host = getS3Endpoint(bucketLocation)
+ if c.s3AccelerateEndpoint != "" && bucketName != "" {
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ // Disable transfer acceleration for non-compliant bucket names.
+ if strings.Contains(bucketName, ".") {
+ return nil, ErrTransferAccelerationBucket(bucketName)
+ }
+ // If transfer acceleration is requested set new host.
+ // For more details about enabling transfer acceleration read here.
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ host = c.s3AccelerateEndpoint
+ } else {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation)
+ }
}
+
// Save scheme.
scheme := c.endpointURL.Scheme
diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
index 23713732a..f41cc0ff4 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v2_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
@@ -943,7 +943,7 @@ func TestCopyObjectV2(t *testing.T) {
}
// Set copy conditions.
- copyConds := NewCopyConditions()
+ copyConds := CopyConditions{}
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
index d19d3e1ae..426d2ddcc 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v4_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
@@ -1255,6 +1255,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
// Generate data more than 32K
buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
+ bufSize := len(buf)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@@ -1263,10 +1264,21 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
t.Fatal("Error:", err, bucketName, objectName)
}
- if n != int64(len(buf)) {
+ if n != int64(bufSize) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
+ defer func() {
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ }()
+
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
@@ -1277,77 +1289,86 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
- if st.Size != int64(len(buf)) {
+ if st.Size != int64(bufSize) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
- offset := int64(2048)
- n, err = r.Seek(offset, 0)
- if err != nil {
- t.Fatal("Error:", err, offset)
- }
- if n != offset {
- t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
- offset, n)
- }
- n, err = r.Seek(0, 1)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != offset {
- t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
- offset, n)
- }
- _, err = r.Seek(offset, 2)
- if err == nil {
- t.Fatal("Error: seek on positive offset for whence '2' should error out")
- }
- n, err = r.Seek(-offset, 2)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != st.Size-offset {
- t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
- }
-
- var buffer1 bytes.Buffer
- if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
}
- }
- if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- // Seek again and read again.
- n, err = r.Seek(offset-1, 0)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != (offset - 1) {
- t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
- }
-
- var buffer2 bytes.Buffer
- if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
- }
- // Verify now lesser bytes.
- if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
+ // Generic seek error for errors other than io.EOF
+ seekErr := errors.New("seek error")
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, seekErr, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, seekErr, false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ // We expect an error
+ if testCase.err == seekErr && err == nil {
+ t.Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
+ }
+ // We expect a specific error
+ if testCase.err != seekErr && testCase.err != err {
+ t.Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
+ }
+ // If we expect an error go to the next loop
+ if testCase.err != nil {
+ continue
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ t.Fatalf("Test %d, error: number of bytes seeked does not match, want %v, got %v\n", i+1,
+ testCase.pos, n)
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
}
}
@@ -1662,7 +1683,7 @@ func TestCopyObject(t *testing.T) {
}
// Set copy conditions.
- copyConds := NewCopyConditions()
+ copyConds := CopyConditions{}
// Start by setting wrong conditions
err = copyConds.SetModified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
@@ -1725,7 +1746,7 @@ func TestCopyObject(t *testing.T) {
}
// CopyObject again but with wrong conditions
- copyConds = NewCopyConditions()
+ copyConds = CopyConditions{}
err = copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 46dfe9348..c35e26b7c 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -164,13 +164,6 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set get bucket location always as path style.
targetURL := c.endpointURL
-
- // Requesting a bucket location from an accelerate endpoint returns a 400,
- // so default to us-east-1 for the lookup
- if s3utils.IsAmazonS3AccelerateEndpoint(c.endpointURL) {
- targetURL.Host = getS3Endpoint("us-east-1")
- }
-
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
diff --git a/vendor/github.com/minio/minio-go/copy-conditions.go b/vendor/github.com/minio/minio-go/copy-conditions.go
index 5dcdfaef0..65018aa09 100644
--- a/vendor/github.com/minio/minio-go/copy-conditions.go
+++ b/vendor/github.com/minio/minio-go/copy-conditions.go
@@ -41,11 +41,13 @@ type CopyConditions struct {
conditions []copyCondition
}
-// NewCopyConditions - Instantiate new list of conditions.
+// NewCopyConditions - Instantiate new list of conditions. This
+// function is left behind for backward compatibility. The idiomatic
+// way to set an empty set of copy conditions is,
+// ``copyConditions := CopyConditions{}``.
+//
func NewCopyConditions() CopyConditions {
- return CopyConditions{
- conditions: make([]copyCondition, 0),
- }
+ return CopyConditions{}
}
// SetMatchETag - set match etag.
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index 0365c7fad..dfb90b5f2 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -1,4 +1,4 @@
-# Golang Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+# Minio Go Client API Reference [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
## Initialize Minio Client object.
@@ -60,7 +60,7 @@ func main() {
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
-|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) |
+|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
| | [`FPutObject`](#FPutObject) | | |
@@ -69,7 +69,7 @@ func main() {
## 1. Constructor
<a name="Minio"></a>
-### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*Client, error)
+### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error)
Initializes a new client object.
__Parameters__
@@ -86,7 +86,7 @@ __Parameters__
## 2. Bucket operations
<a name="MakeBucket"></a>
-### MakeBucket(bucketName string, location string) error
+### MakeBucket(bucketName, location string) error
Creates a new bucket.
@@ -216,7 +216,7 @@ if err != nil {
```
<a name="ListObjects"></a>
-### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
Lists objects in a bucket.
@@ -267,7 +267,7 @@ for object := range objectCh {
<a name="ListObjectsV2"></a>
-### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
Lists objects in a bucket using the recommended listing API v2
@@ -317,7 +317,7 @@ for object := range objectCh {
```
<a name="ListIncompleteUploads"></a>
-### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
+### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
Lists partially uploaded objects in a bucket.
@@ -373,7 +373,7 @@ for multiPartObject := range multiPartObjectCh {
## 3. Object operations
<a name="GetObject"></a>
-### GetObject(bucketName string, objectName string) (*Object, error)
+### GetObject(bucketName, objectName string) (*Object, error)
Downloads an object.
@@ -392,7 +392,7 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`object` | _*minio.Object_ |_minio.Object_ represents object reader |
+|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. |
__Example__
@@ -418,7 +418,7 @@ if _, err = io.Copy(localFile, object); err != nil {
```
<a name="FGetObject"></a>
-### FGetObject(bucketName string, objectName string, filePath string) error
+### FGetObject(bucketName, objectName, filePath string) error
Downloads and saves the object as a file in the local filesystem.
@@ -446,7 +446,7 @@ if err != nil {
```
<a name="PutObject"></a>
-### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
Uploads an object.
@@ -489,7 +489,7 @@ if err != nil {
<a name="CopyObject"></a>
-### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
+### CopyObject(bucketName, objectName, objectSource string, conditions CopyConditions) error
Copy a source object into a new object with the provided name in the provided bucket.
@@ -509,24 +509,34 @@ __Example__
```go
+// Use-case-1
+// To copy an existing object to a new object with _no_ copy conditions.
+copyConditions := minio.CopyConditions{}
+err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-// All following conditions are allowed and can be combined together.
+// Use-case-2
+// To copy an existing object to a new object with the following copy conditions
+// 1. that matches a given ETag
+// 2. and modified after 1st April 2014
+// 3. but unmodified since 23rd April 2014
-// Set copy conditions.
-var copyConds = minio.NewCopyConditions()
-// Set modified condition, copy object modified since 2014 April.
-copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+// Initialize empty copy conditions.
+var copyConds = minio.CopyConditions{}
-// Set unmodified condition, copy object unmodified since 2014 April.
-// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+// copy object that matches the given ETag.
+copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
-// Set matching ETag condition, copy object which matches the following ETag.
-// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+// and modified after 1st April 2014
+copyConds.SetModified(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
-// Set matching ETag except condition, copy object which does not match the following ETag.
-// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+// but unmodified since 23rd April 2014
+copyConds.SetUnmodified(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
-err := minioClient.CopyObject("mybucket", "myobject", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
if err != nil {
fmt.Println(err)
return
@@ -535,7 +545,7 @@ if err != nil {
```
<a name="FPutObject"></a>
-### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
+### FPutObject(bucketName, objectName, filePath, contentType string) error
Uploads contents from a file to objectName.
@@ -569,7 +579,7 @@ if err != nil {
```
<a name="StatObject"></a>
-### StatObject(bucketName string, objectName string) (ObjectInfo, error)
+### StatObject(bucketName, objectName string) (ObjectInfo, error)
Gets metadata of an object.
@@ -613,7 +623,7 @@ fmt.Println(objInfo)
```
<a name="RemoveObject"></a>
-### RemoveObject(bucketName string, objectName string) error
+### RemoveObject(bucketName, objectName string) error
Removes an object.
@@ -670,7 +680,7 @@ for e := range errorCh {
<a name="RemoveIncompleteUpload"></a>
-### RemoveIncompleteUpload(bucketName string, objectName string) error
+### RemoveIncompleteUpload(bucketName, objectName string) error
Removes a partially uploaded object.
@@ -699,7 +709,7 @@ if err != nil {
<a name="PresignedGetObject"></a>
-### PresignedGetObject(bucketName string, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
@@ -733,7 +743,7 @@ if err != nil {
```
<a name="PresignedPutObject"></a>
-### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
+### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
@@ -822,7 +832,7 @@ fmt.Printf("%s\n", url)
## 5. Bucket policy/notification operations
<a name="SetBucketPolicy"></a>
-### SetBucketPolicy(bucketname string, objectPrefix string, policy policy.BucketPolicy) error
+### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
Set access permissions on bucket or an object prefix.
@@ -864,7 +874,7 @@ if err != nil {
```
<a name="GetBucketPolicy"></a>
-### GetBucketPolicy(bucketName string, objectPrefix string) (policy.BucketPolicy, error)
+### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
Get access permissions on a bucket or a prefix.
@@ -901,7 +911,7 @@ fmt.Println("Access permissions for mybucket is", bucketPolicy)
```
<a name="ListBucketPolicies"></a>
-### ListBucketPolicies(bucketName string, objectPrefix string) (map[string]BucketPolicy, error)
+### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
Get access permissions rules associated to the specified bucket and prefix.
@@ -1099,7 +1109,7 @@ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET"
## 6. Client custom settings
<a name="SetAppInfo"></a>
-### SetAppInfo(appName string, appVersion string)
+### SetAppInfo(appName, appVersion string)
Adds application details to User-Agent.
__Parameters__
@@ -1148,6 +1158,17 @@ __Parameters__
### TraceOff()
Disables HTTP tracing.
+<a name="SetS3TransferAccelerate"></a>
+### SetS3TransferAccelerate(acceleratedEndpoint string)
+Set AWS S3 transfer acceleration endpoint for all API requests hereafter.
+NOTE: This API applies only to AWS S3 and ignored with other S3 compatible object storage services.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`acceleratedEndpoint` | _string_ | Set to new S3 transfer acceleration endpoint.|
+
## 7. Explore Further
diff --git a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
index 9f9e5bc4f..a9ec78fee 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
@@ -45,7 +45,7 @@ func main() {
// All following conditions are allowed and can be combined together.
// Set copy conditions.
- var copyConds = minio.NewCopyConditions()
+ var copyConds = minio.CopyConditions{}
// Set modified condition, copy object modified since 2014 April.
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
new file mode 100644
index 000000000..e47976f2e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
@@ -0,0 +1,56 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Enable S3 transfer accelerate endpoint.
+ s3Client.S3TransferAccelerate("s3-accelerate.amazonaws.com")
+
+ object, err := os.Open("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer object.Close()
+
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
index ae1cea337..a3b6ed845 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -85,10 +85,6 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
return true
}
- if IsAmazonS3AccelerateEndpoint(endpointURL) {
- return true
- }
-
return endpointURL.Host == "s3.amazonaws.com"
}
@@ -105,14 +101,6 @@ func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
}
-// IsAmazonS3AccelerateEndpoint - Match if it is an Amazon S3 Accelerate
-func IsAmazonS3AccelerateEndpoint(endpointURL url.URL) bool {
- if endpointURL == sentinelURL {
- return false
- }
- return endpointURL.Host == "s3-accelerate.amazonaws.com"
-}
-
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
index 55084d461..9f33488e0 100644
--- a/vendor/github.com/minio/minio-go/pkg/set/stringset.go
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
@@ -25,8 +25,8 @@ import (
// StringSet - uses map as set of strings.
type StringSet map[string]struct{}
-// keys - returns StringSet keys.
-func (set StringSet) keys() []string {
+// ToSlice - returns StringSet as string slice.
+func (set StringSet) ToSlice() []string {
keys := make([]string, 0, len(set))
for k := range set {
keys = append(keys, k)
@@ -141,7 +141,7 @@ func (set StringSet) Union(sset StringSet) StringSet {
// MarshalJSON - converts to JSON data.
func (set StringSet) MarshalJSON() ([]byte, error) {
- return json.Marshal(set.keys())
+ return json.Marshal(set.ToSlice())
}
// UnmarshalJSON - parses JSON data and creates new set with it.
@@ -169,7 +169,7 @@ func (set *StringSet) UnmarshalJSON(data []byte) error {
// String - returns printable string of the set.
func (set StringSet) String() string {
- return fmt.Sprintf("%s", set.keys())
+ return fmt.Sprintf("%s", set.ToSlice())
}
// NewStringSet - creates new string set.
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
index 4b74e7065..e276fec5a 100644
--- a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
@@ -17,6 +17,7 @@
package set
import (
+ "fmt"
"strings"
"testing"
)
@@ -320,3 +321,27 @@ func TestStringSetString(t *testing.T) {
}
}
}
+
+// StringSet.ToSlice() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetToSlice(t *testing.T) {
+ testCases := []struct {
+ set StringSet
+ expectedResult string
+ }{
+ // Test empty set.
+ {NewStringSet(), `[]`},
+ // Test set with empty value.
+ {CreateStringSet(""), `[]`},
+ // Test set with value.
+ {CreateStringSet("foo"), `[foo]`},
+ // Test set with value.
+ {CreateStringSet("foo", "bar"), `[bar foo]`},
+ }
+
+ for _, testCase := range testCases {
+ sslice := testCase.set.ToSlice()
+ if str := fmt.Sprintf("%s", sslice); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}