summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/api-put-object-multipart.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/api-put-object-multipart.go')
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go54
1 files changed, 23 insertions, 31 deletions
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index cdd3f53c2..f74eae626 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -45,11 +45,11 @@ import (
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
+ return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
@@ -58,17 +58,17 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
+ return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
-// putObjectStream uploads files bigger than 5MiB, and also supports
+// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -83,27 +83,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all previously uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // getUploadID for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
- // If This session is a continuation of a previous session fetch all
- // previously uploaded parts info and as a special case only fetch partsInfo
- // for only known upload size.
- if !isNew {
- // Fetch previously uploaded parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
@@ -139,12 +124,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// as we read from the source.
reader = newHook(tmpBuffer, progress)
+ part, ok := partsInfo[partNumber]
+
// Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
+ if !ok || shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
- }, partsInfo) {
+ }, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
@@ -212,7 +199,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@@ -225,13 +212,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
urlValues := make(url.Values)
urlValues.Set("uploads", "")
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
// Set ContentType header.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // Set a default content-type header if the latter is not provided
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
reqMetadata := requestMetadata{
bucketName: bucketName,