summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/api-put-object-common.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/api-put-object-common.go')
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go225
1 files changed, 225 insertions, 0 deletions
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
new file mode 100644
index 000000000..2eaef2e30
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -0,0 +1,225 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+)
+
+// Verify if reader is *os.File
+func isFile(reader io.Reader) (ok bool) {
+ _, ok = reader.(*os.File)
+ return
+}
+
+// Verify if reader is *minio.Object
+func isObject(reader io.Reader) (ok bool) {
+ _, ok = reader.(*Object)
+ return
+}
+
+// Verify if reader is a generic ReaderAt
+func isReadAt(reader io.Reader) (ok bool) {
+ _, ok = reader.(io.ReaderAt)
+ return
+}
+
+// shouldUploadPart - verify if part should be uploaded.
+func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
+ // If part not found should upload the part.
+ uploadedPart, found := objectParts[objPart.PartNumber]
+ if !found {
+ return true
+ }
+ // if size mismatches should upload the part.
+ if objPart.Size != uploadedPart.Size {
+ return true
+ }
+ // if md5sum mismatches should upload the part.
+ if objPart.ETag != uploadedPart.ETag {
+ return true
+ }
+ return false
+}
+
+// optimalPartInfo - calculate the optimal part info for a given
+// object size.
+//
+// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
+// object storage it will have the following parameters as constants.
+//
+// maxPartsCount - 10000
+// minPartSize - 5MiB
+// maxMultipartPutObjectSize - 5TiB
+//
+func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
+ // object size is '-1' set it to 5TiB.
+ if objectSize == -1 {
+ objectSize = maxMultipartPutObjectSize
+ }
+ // object size is larger than supported maximum.
+ if objectSize > maxMultipartPutObjectSize {
+ err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
+ return
+ }
+ // Use floats for part size for all calculations to avoid
+ // overflows during float64 to int64 conversions.
+ partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
+ partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
+ // Total parts count.
+ totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
+ // Part size.
+ partSize = int64(partSizeFlt)
+ // Last part size.
+ lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
+ return totalPartsCount, partSize, lastPartSize, nil
+}
+
+// hashCopyBuffer is identical to hashCopyN except that it doesn't take
+// any size argument but takes a buffer argument and reader should be
+// of io.ReaderAt interface.
+//
+// Stages reads from offsets into the buffer, if buffer is nil it is
+// initialized to optimalBufferSize.
+func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
+ hashWriter := writer
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // Buffer is nil, initialize.
+ if buf == nil {
+ buf = make([]byte, optimalReadBufferSize)
+ }
+
+ // Offset to start reading from.
+ var readAtOffset int64
+
+ // Following block reads data at an offset from the input
+ // reader and copies data to into local temporary file.
+ for {
+ readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ return 0, rerr
+ }
+ }
+ writeSize, werr := hashWriter.Write(buf[:readAtSize])
+ if werr != nil {
+ return 0, werr
+ }
+ if readAtSize != writeSize {
+ return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
+ }
+ readAtOffset += int64(writeSize)
+ size += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, err
+}
+
+// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
+func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
+ hashWriter := writer
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // Copies to input at writer.
+ size, err = io.CopyN(hashWriter, reader, partSize)
+ if err != nil {
+ // If not EOF return error right here.
+ if err != io.EOF {
+ return 0, err
+ }
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, err
+}
+
+// getUploadID - fetch upload id if already present for an object name
+// or initiate a new request to fetch a new upload id.
+func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", false, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return "", false, err
+ }
+
+ // Set content Type to default if empty string.
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Find upload id for previous upload for an object.
+ uploadID, err = c.findUploadID(bucketName, objectName)
+ if err != nil {
+ return "", false, err
+ }
+ if uploadID == "" {
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
+ if err != nil {
+ return "", false, err
+ }
+ // Save the new upload id.
+ uploadID = initMultipartUploadResult.UploadID
+ // Indicate that this is a new upload id.
+ isNew = true
+ }
+ return uploadID, isNew, nil
+}
+
+// computeHash - Calculates hashes for an input read Seeker.
+func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
+ hashWriter := ioutil.Discard
+ for _, v := range hashAlgorithms {
+ hashWriter = io.MultiWriter(hashWriter, v)
+ }
+
+ // If no buffer is provided, no need to allocate just use io.Copy.
+ size, err = io.Copy(hashWriter, reader)
+ if err != nil {
+ return 0, err
+ }
+
+ // Seek back reader to the beginning location.
+ if _, err := reader.Seek(0, 0); err != nil {
+ return 0, err
+ }
+
+ for k, v := range hashAlgorithms {
+ hashSums[k] = v.Sum(nil)
+ }
+ return size, nil
+}