summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/api-put-object-multipart.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/api-put-object-multipart.go')
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go140
1 files changed, 122 insertions, 18 deletions
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index f74eae626..5134a7fd2 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -66,6 +66,112 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
+// putObjectMultipartStreamNoChecksum - upload a large object using
+// multipart upload and streaming signature for signing payload.
+// N B We don't resume an incomplete multipart upload, we overwrite
+// existing parts of an incomplete upload.
+func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
+ reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
+
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, err := c.findUploadID(bucketName, objectName)
+ if err != nil {
+ return 0, err
+ }
+ if uploadID == "" {
+ // Initiates a new multipart request
+ uploadID, err = c.newUploadID(bucketName, objectName, metadata)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ hookReader := newHook(reader, progress)
+
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if err == io.EOF && size < 0 {
+ break
+ }
+
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partSize
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
@@ -107,16 +213,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
+ if c.overrideSignerType.IsV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// Calculates hash sums while copying partSize bytes into tmpBuffer.
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
- if rErr != nil {
- if rErr != io.EOF {
- return 0, rErr
- }
+ if rErr != nil && rErr != io.EOF {
+ return 0, rErr
}
var reader io.Reader
@@ -127,13 +231,13 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
part, ok := partsInfo[partNumber]
// Verify if part should be uploaded.
- if !ok || shouldUploadPart(objectPart{
+ if !ok || shouldUploadPart(ObjectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
- var objPart objectPart
+ var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
// Reset the temporary buffer upon any error.
@@ -181,7 +285,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
@@ -253,25 +357,25 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
}
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
+func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
- return objectPart{}, err
+ return ObjectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
- return objectPart{}, err
+ return ObjectPart{}, err
}
if size > maxPartSize {
- return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
+ return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
}
if size <= -1 {
- return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
+ return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
- return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
+ return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
- return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
+ return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
@@ -295,15 +399,15 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
- return objectPart{}, err
+ return ObjectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
- objPart := objectPart{}
+ objPart := ObjectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.