summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/api-put-object-common.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/api-put-object-common.go')
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go141
1 files changed, 4 insertions, 137 deletions
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 68a459f4a..833f1fe8f 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -17,12 +17,12 @@
package minio
import (
- "fmt"
"hash"
"io"
- "io/ioutil"
"math"
"os"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Verify if reader is *os.File
@@ -43,23 +43,6 @@ func isReadAt(reader io.Reader) (ok bool) {
return
}
-// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool {
- // If part not found should upload the part.
- if uploadReq.Part == nil {
- return true
- }
- // if size mismatches should upload the part.
- if objPart.Size != uploadReq.Part.Size {
- return true
- }
- // if md5sum mismatches should upload the part.
- if objPart.ETag != uploadReq.Part.ETag {
- return true
- }
- return false
-}
-
// optimalPartInfo - calculate the optimal part info for a given
// object size.
//
@@ -93,55 +76,6 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
-// hashCopyBuffer is identical to hashCopyN except that it doesn't take
-// any size argument but takes a buffer argument and reader should be
-// of io.ReaderAt interface.
-//
-// Stages reads from offsets into the buffer, if buffer is nil it is
-// initialized to optimalBufferSize.
-func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
- hashWriter := writer
- for _, v := range hashAlgorithms {
- hashWriter = io.MultiWriter(hashWriter, v)
- }
-
- // Buffer is nil, initialize.
- if buf == nil {
- buf = make([]byte, optimalReadBufferSize)
- }
-
- // Offset to start reading from.
- var readAtOffset int64
-
- // Following block reads data at an offset from the input
- // reader and copies data to into local temporary file.
- for {
- readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
- if rerr != nil {
- if rerr != io.EOF {
- return 0, rerr
- }
- }
- writeSize, werr := hashWriter.Write(buf[:readAtSize])
- if werr != nil {
- return 0, werr
- }
- if readAtSize != writeSize {
- return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
- }
- readAtOffset += int64(writeSize)
- size += int64(writeSize)
- if rerr == io.EOF {
- break
- }
- }
-
- for k, v := range hashAlgorithms {
- hashSums[k] = v.Sum(nil)
- }
- return size, err
-}
-
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
hashWriter := writer
@@ -168,10 +102,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return "", err
}
@@ -182,70 +116,3 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
}
return initMultipartUploadResult.UploadID, nil
}
-
-// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
-// or initiate a new multipart session if no current one found
-func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) {
- // A map of all uploaded parts.
- var partsInfo map[int]ObjectPart
- var err error
-
- uploadID, err := c.findUploadID(bucketName, objectName)
- if err != nil {
- return "", nil, err
- }
-
- if uploadID == "" {
- // Initiates a new multipart request
- uploadID, err = c.newUploadID(bucketName, objectName, metaData)
- if err != nil {
- return "", nil, err
- }
- } else {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
- // initiate a new multipart upload
- if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
- uploadID, err = c.newUploadID(bucketName, objectName, metaData)
- if err != nil {
- return "", nil, err
- }
- } else {
- return "", nil, err
- }
- }
- }
-
- // Allocate partsInfo if not done yet
- if partsInfo == nil {
- partsInfo = make(map[int]ObjectPart)
- }
-
- return uploadID, partsInfo, nil
-}
-
-// computeHash - Calculates hashes for an input read Seeker.
-func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
- hashWriter := ioutil.Discard
- for _, v := range hashAlgorithms {
- hashWriter = io.MultiWriter(hashWriter, v)
- }
-
- // If no buffer is provided, no need to allocate just use io.Copy.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return 0, err
- }
-
- // Seek back reader to the beginning location.
- if _, err := reader.Seek(0, 0); err != nil {
- return 0, err
- }
-
- for k, v := range hashAlgorithms {
- hashSums[k] = v.Sum(nil)
- }
- return size, nil
-}