summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/goamz/goamz/s3
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/goamz/goamz/s3')
-rw-r--r--vendor/github.com/goamz/goamz/s3/export_test.go17
-rw-r--r--vendor/github.com/goamz/goamz/s3/multi.go439
-rw-r--r--vendor/github.com/goamz/goamz/s3/multi_test.go373
-rw-r--r--vendor/github.com/goamz/goamz/s3/responses_test.go202
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3.go1164
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3_test.go427
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3i_test.go590
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3t_test.go83
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3test/server.go642
-rw-r--r--vendor/github.com/goamz/goamz/s3/sign.go141
-rw-r--r--vendor/github.com/goamz/goamz/s3/sign_test.go132
11 files changed, 0 insertions, 4210 deletions
diff --git a/vendor/github.com/goamz/goamz/s3/export_test.go b/vendor/github.com/goamz/goamz/s3/export_test.go
deleted file mode 100644
index 4ff913cde..000000000
--- a/vendor/github.com/goamz/goamz/s3/export_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package s3
-
-import (
- "github.com/goamz/goamz/aws"
-)
-
-func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
- sign(auth, method, path, params, headers)
-}
-
-func SetListPartsMax(n int) {
- listPartsMax = n
-}
-
-func SetListMultiMax(n int) {
- listMultiMax = n
-}
diff --git a/vendor/github.com/goamz/goamz/s3/multi.go b/vendor/github.com/goamz/goamz/s3/multi.go
deleted file mode 100644
index 348ead300..000000000
--- a/vendor/github.com/goamz/goamz/s3/multi.go
+++ /dev/null
@@ -1,439 +0,0 @@
-package s3
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "encoding/hex"
- "encoding/xml"
- "errors"
- "io"
- "sort"
- "strconv"
-)
-
-// Multi represents an unfinished multipart upload.
-//
-// Multipart uploads allow sending big objects in smaller chunks.
-// After all parts have been sent, the upload must be explicitly
-// completed by calling Complete with the list of parts.
-//
-// See http://goo.gl/vJfTG for an overview of multipart uploads.
-type Multi struct {
- Bucket *Bucket
- Key string
- UploadId string
-}
-
-// That's the default. Here just for testing.
-var listMultiMax = 1000
-
-type listMultiResp struct {
- NextKeyMarker string
- NextUploadIdMarker string
- IsTruncated bool
- Upload []Multi
- CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
-}
-
-// ListMulti returns the list of unfinished multipart uploads in b.
-//
-// The prefix parameter limits the response to keys that begin with the
-// specified prefix. You can use prefixes to separate a bucket into different
-// groupings of keys (to get the feeling of folders, for example).
-//
-// The delim parameter causes the response to group all of the keys that
-// share a common prefix up to the next delimiter in a single entry within
-// the CommonPrefixes field. You can use delimiters to separate a bucket
-// into different groupings of keys, similar to how folders would work.
-//
-// See http://goo.gl/ePioY for details.
-func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
- params := map[string][]string{
- "uploads": {""},
- "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
- "prefix": {prefix},
- "delimiter": {delim},
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "GET",
- bucket: b.Name,
- params: params,
- }
- var resp listMultiResp
- err := b.S3.query(req, &resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, nil, err
- }
- for i := range resp.Upload {
- multi := &resp.Upload[i]
- multi.Bucket = b
- multis = append(multis, multi)
- }
- prefixes = append(prefixes, resp.CommonPrefixes...)
- if !resp.IsTruncated {
- return multis, prefixes, nil
- }
- params["key-marker"] = []string{resp.NextKeyMarker}
- params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
- attempt = b.S3.AttemptStrategy.Start() // Last request worked.
- }
- panic("unreachable")
-}
-
-// Multi returns a multipart upload handler for the provided key
-// inside b. If a multipart upload exists for key, it is returned,
-// otherwise a new multipart upload is initiated with contType and perm.
-func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
- multis, _, err := b.ListMulti(key, "")
- if err != nil && !hasCode(err, "NoSuchUpload") {
- return nil, err
- }
- for _, m := range multis {
- if m.Key == key {
- return m, nil
- }
- }
- return b.InitMulti(key, contType, perm)
-}
-
-// InitMulti initializes a new multipart upload at the provided
-// key inside b and returns a value for manipulating it.
-//
-// See http://goo.gl/XP8kL for details.
-func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
- headers := map[string][]string{
- "Content-Type": {contType},
- "Content-Length": {"0"},
- "x-amz-acl": {string(perm)},
- }
- params := map[string][]string{
- "uploads": {""},
- }
- req := &request{
- method: "POST",
- bucket: b.Name,
- path: key,
- headers: headers,
- params: params,
- }
- var err error
- var resp struct {
- UploadId string `xml:"UploadId"`
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, &resp)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
-}
-
-// PutPart sends part n of the multipart upload, reading all the content from r.
-// Each part, except for the last one, must be at least 5MB in size.
-//
-// See http://goo.gl/pqZer for details.
-func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
- partSize, _, md5b64, err := seekerInfo(r)
- if err != nil {
- return Part{}, err
- }
- return m.putPart(n, r, partSize, md5b64)
-}
-
-func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(partSize, 10)},
- "Content-MD5": {md5b64},
- }
- params := map[string][]string{
- "uploadId": {m.UploadId},
- "partNumber": {strconv.FormatInt(int64(n), 10)},
- }
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- _, err := r.Seek(0, 0)
- if err != nil {
- return Part{}, err
- }
- req := &request{
- method: "PUT",
- bucket: m.Bucket.Name,
- path: m.Key,
- headers: headers,
- params: params,
- payload: r,
- }
- err = m.Bucket.S3.prepare(req)
- if err != nil {
- return Part{}, err
- }
- resp, err := m.Bucket.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return Part{}, err
- }
- etag := resp.Header.Get("ETag")
- if etag == "" {
- return Part{}, errors.New("part upload succeeded with no ETag")
- }
- return Part{n, etag, partSize}, nil
- }
- panic("unreachable")
-}
-
-func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
- _, err = r.Seek(0, 0)
- if err != nil {
- return 0, "", "", err
- }
- digest := md5.New()
- size, err = io.Copy(digest, r)
- if err != nil {
- return 0, "", "", err
- }
- sum := digest.Sum(nil)
- md5hex = hex.EncodeToString(sum)
- md5b64 = base64.StdEncoding.EncodeToString(sum)
- return size, md5hex, md5b64, nil
-}
-
-type Part struct {
- N int `xml:"PartNumber"`
- ETag string
- Size int64
-}
-
-type partSlice []Part
-
-func (s partSlice) Len() int { return len(s) }
-func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
-func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type listPartsResp struct {
- NextPartNumberMarker string
- IsTruncated bool
- Part []Part
-}
-
-// That's the default. Here just for testing.
-var listPartsMax = 1000
-
-// ListParts returns the list of previously uploaded parts in m,
-// ordered by part number.
-//
-// See http://goo.gl/ePioY for details.
-func (m *Multi) ListParts() ([]Part, error) {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
- }
- var parts partSlice
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "GET",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- }
- var resp listPartsResp
- err := m.Bucket.S3.query(req, &resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- parts = append(parts, resp.Part...)
- if !resp.IsTruncated {
- sort.Sort(parts)
- return parts, nil
- }
- params["part-number-marker"] = []string{resp.NextPartNumberMarker}
- attempt = m.Bucket.S3.AttemptStrategy.Start() // Last request worked.
- }
- panic("unreachable")
-}
-
-type ReaderAtSeeker interface {
- io.ReaderAt
- io.ReadSeeker
-}
-
-// PutAll sends all of r via a multipart upload with parts no larger
-// than partSize bytes, which must be set to at least 5MB.
-// Parts previously uploaded are either reused if their checksum
-// and size match the new part, or otherwise overwritten with the
-// new content.
-// PutAll returns all the parts of m (reused or not).
-func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
- old, err := m.ListParts()
- if err != nil && !hasCode(err, "NoSuchUpload") {
- return nil, err
- }
- reuse := 0 // Index of next old part to consider reusing.
- current := 1 // Part number of latest good part handled.
- totalSize, err := r.Seek(0, 2)
- if err != nil {
- return nil, err
- }
- first := true // Must send at least one empty part if the file is empty.
- var result []Part
-NextSection:
- for offset := int64(0); offset < totalSize || first; offset += partSize {
- first = false
- if offset+partSize > totalSize {
- partSize = totalSize - offset
- }
- section := io.NewSectionReader(r, offset, partSize)
- _, md5hex, md5b64, err := seekerInfo(section)
- if err != nil {
- return nil, err
- }
- for reuse < len(old) && old[reuse].N <= current {
- // Looks like this part was already sent.
- part := &old[reuse]
- etag := `"` + md5hex + `"`
- if part.N == current && part.Size == partSize && part.ETag == etag {
- // Checksum matches. Reuse the old part.
- result = append(result, *part)
- current++
- continue NextSection
- }
- reuse++
- }
-
- // Part wasn't found or doesn't match. Send it.
- part, err := m.putPart(current, section, partSize, md5b64)
- if err != nil {
- return nil, err
- }
- result = append(result, part)
- current++
- }
- return result, nil
-}
-
-type completeUpload struct {
- XMLName xml.Name `xml:"CompleteMultipartUpload"`
- Parts completeParts `xml:"Part"`
-}
-
-type completePart struct {
- PartNumber int
- ETag string
-}
-
-type completeParts []completePart
-
-func (p completeParts) Len() int { return len(p) }
-func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
-func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-type completeResponse struct {
- // The element name: should be either CompleteMultipartUploadResult or Error.
- XMLName xml.Name
- // If the element was error, then it should have the following:
- Code string
- Message string
- RequestId string
- HostId string
-}
-
-// Complete assembles the given previously uploaded parts into the
-// final object. This operation may take several minutes.
-//
-// The complete call to AMZ may still fail after returning HTTP 200,
-// so even though it's unusued, the body of the reply must be demarshalled
-// and checked to see whether or not the complete succeeded.
-//
-// See http://goo.gl/2Z7Tw for details.
-func (m *Multi) Complete(parts []Part) error {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- }
- c := completeUpload{}
- for _, p := range parts {
- c.Parts = append(c.Parts, completePart{p.N, p.ETag})
- }
- sort.Sort(c.Parts)
- data, err := xml.Marshal(&c)
- if err != nil {
- return err
- }
-
- // Setting Content-Length prevents breakage on DreamObjects
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "POST",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- payload: bytes.NewReader(data),
- headers: map[string][]string{
- "Content-Length": []string{strconv.Itoa(len(data))},
- },
- }
-
- resp := &completeResponse{}
- err := m.Bucket.S3.query(req, resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err == nil && resp.XMLName.Local == "Error" {
- err = &Error{
- StatusCode: 200,
- Code: resp.Code,
- Message: resp.Message,
- RequestId: resp.RequestId,
- HostId: resp.HostId,
- }
- }
- return err
- }
- panic("unreachable")
-}
-
-// Abort deletes an unifinished multipart upload and any previously
-// uploaded parts for it.
-//
-// After a multipart upload is aborted, no additional parts can be
-// uploaded using it. However, if any part uploads are currently in
-// progress, those part uploads might or might not succeed. As a result,
-// it might be necessary to abort a given multipart upload multiple
-// times in order to completely free all storage consumed by all parts.
-//
-// NOTE: If the described scenario happens to you, please report back to
-// the goamz authors with details. In the future such retrying should be
-// handled internally, but it's not clear what happens precisely (Is an
-// error returned? Is the issue completely undetectable?).
-//
-// See http://goo.gl/dnyJw for details.
-func (m *Multi) Abort() error {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- }
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "DELETE",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- }
- err := m.Bucket.S3.query(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- return err
- }
- panic("unreachable")
-}
diff --git a/vendor/github.com/goamz/goamz/s3/multi_test.go b/vendor/github.com/goamz/goamz/s3/multi_test.go
deleted file mode 100644
index 5c788d9cc..000000000
--- a/vendor/github.com/goamz/goamz/s3/multi_test.go
+++ /dev/null
@@ -1,373 +0,0 @@
-package s3_test
-
-import (
- "encoding/xml"
- "io"
- "io/ioutil"
- "strings"
-
- "github.com/goamz/goamz/s3"
- . "gopkg.in/check.v1"
-)
-
-func (s *S) TestInitMulti(c *C) {
- testServer.Response(200, nil, InitMultiResultDump)
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "POST")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
- c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
- c.Assert(req.Form["uploads"], DeepEquals, []string{""})
-
- c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
-}
-
-func (s *S) TestMultiNoPreviousUpload(c *C) {
- // Don't retry the NoSuchUpload error.
- s.DisableRetries()
-
- testServer.Response(404, nil, NoSuchUploadErrorDump)
- testServer.Response(200, nil, InitMultiResultDump)
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.Multi("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/")
- c.Assert(req.Form["uploads"], DeepEquals, []string{""})
- c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
-
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "POST")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["uploads"], DeepEquals, []string{""})
-
- c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
-}
-
-func (s *S) TestMultiReturnOld(c *C) {
- testServer.Response(200, nil, ListMultiResultDump)
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.Multi("multi1", "text/plain", s3.Private)
- c.Assert(err, IsNil)
- c.Assert(multi.Key, Equals, "multi1")
- c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/")
- c.Assert(req.Form["uploads"], DeepEquals, []string{""})
- c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
-}
-
-func (s *S) TestListParts(c *C) {
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(200, nil, ListPartsResultDump1)
- testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
- testServer.Response(200, nil, ListPartsResultDump2)
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- parts, err := multi.ListParts()
- c.Assert(err, IsNil)
- c.Assert(parts, HasLen, 3)
- c.Assert(parts[0].N, Equals, 1)
- c.Assert(parts[0].Size, Equals, int64(5))
- c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
- c.Assert(parts[1].N, Equals, 2)
- c.Assert(parts[1].Size, Equals, int64(5))
- c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
- c.Assert(parts[2].N, Equals, 3)
- c.Assert(parts[2].Size, Equals, int64(5))
- c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
- testServer.WaitRequest()
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
- c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
-
- testServer.WaitRequest() // The internal error.
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
- c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
- c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
-}
-
-func (s *S) TestPutPart(c *C) {
- headers := map[string]string{
- "ETag": `"26f90efd10d614f100252ff56d88dad8"`,
- }
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(200, headers, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
- c.Assert(err, IsNil)
- c.Assert(part.N, Equals, 1)
- c.Assert(part.Size, Equals, int64(8))
- c.Assert(part.ETag, Equals, headers["ETag"])
-
- testServer.WaitRequest()
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
- c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
-}
-
-func readAll(r io.Reader) string {
- data, err := ioutil.ReadAll(r)
- if err != nil {
- panic(err)
- }
- return string(data)
-}
-
-func (s *S) TestPutAllNoPreviousUpload(c *C) {
- // Don't retry the NoSuchUpload error.
- s.DisableRetries()
-
- etag1 := map[string]string{"ETag": `"etag1"`}
- etag2 := map[string]string{"ETag": `"etag2"`}
- etag3 := map[string]string{"ETag": `"etag3"`}
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(404, nil, NoSuchUploadErrorDump)
- testServer.Response(200, etag1, "")
- testServer.Response(200, etag2, "")
- testServer.Response(200, etag3, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
- c.Assert(parts, HasLen, 3)
- c.Assert(parts[0].ETag, Equals, `"etag1"`)
- c.Assert(parts[1].ETag, Equals, `"etag2"`)
- c.Assert(parts[2].ETag, Equals, `"etag3"`)
- c.Assert(err, IsNil)
-
- // Init
- testServer.WaitRequest()
-
- // List old parts. Won't find anything.
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
-
- // Send part 1.
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
- c.Assert(readAll(req.Body), Equals, "part1")
-
- // Send part 2.
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
- c.Assert(readAll(req.Body), Equals, "part2")
-
- // Send part 3 with shorter body.
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
- c.Assert(readAll(req.Body), Equals, "last")
-}
-
-func (s *S) TestPutAllZeroSizeFile(c *C) {
- // Don't retry the NoSuchUpload error.
- s.DisableRetries()
-
- etag1 := map[string]string{"ETag": `"etag1"`}
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(404, nil, NoSuchUploadErrorDump)
- testServer.Response(200, etag1, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- // Must send at least one part, so that completing it will work.
- parts, err := multi.PutAll(strings.NewReader(""), 5)
- c.Assert(parts, HasLen, 1)
- c.Assert(parts[0].ETag, Equals, `"etag1"`)
- c.Assert(err, IsNil)
-
- // Init
- testServer.WaitRequest()
-
- // List old parts. Won't find anything.
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
-
- // Send empty part.
- req = testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
- c.Assert(readAll(req.Body), Equals, "")
-}
-
-func (s *S) TestPutAllResume(c *C) {
- etag2 := map[string]string{"ETag": `"etag2"`}
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(200, nil, ListPartsResultDump1)
- testServer.Response(200, nil, ListPartsResultDump2)
- testServer.Response(200, etag2, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- // "part1" and "part3" match the checksums in ResultDump1.
- // The middle one is a mismatch (it refers to "part2").
- parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
- c.Assert(parts, HasLen, 3)
- c.Assert(parts[0].N, Equals, 1)
- c.Assert(parts[0].Size, Equals, int64(5))
- c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
- c.Assert(parts[1].N, Equals, 2)
- c.Assert(parts[1].Size, Equals, int64(5))
- c.Assert(parts[1].ETag, Equals, `"etag2"`)
- c.Assert(parts[2].N, Equals, 3)
- c.Assert(parts[2].Size, Equals, int64(5))
- c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
- c.Assert(err, IsNil)
-
- // Init
- testServer.WaitRequest()
-
- // List old parts, broken in two requests.
- for i := 0; i < 2; i++ {
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- }
-
- // Send part 2, as it didn't match the checksum.
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
- c.Assert(readAll(req.Body), Equals, "partX")
-}
-
-func (s *S) TestMultiComplete(c *C) {
- testServer.Response(200, nil, InitMultiResultDump)
- // Note the 200 response. Completing will hold the connection on some
- // kind of long poll, and may return a late error even after a 200.
- testServer.Response(200, nil, InternalErrorDump)
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
- // returns InternalErrorDump in the payload, which should manifest as
- // an error.
- c.Assert(err, NotNil)
-
- testServer.WaitRequest()
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "POST")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
-
- var payload struct {
- XMLName xml.Name
- Part []struct {
- PartNumber int
- ETag string
- }
- }
-
- dec := xml.NewDecoder(req.Body)
- err = dec.Decode(&payload)
- c.Assert(err, IsNil)
-
- c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
- c.Assert(len(payload.Part), Equals, 2)
- c.Assert(payload.Part[0].PartNumber, Equals, 1)
- c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
- c.Assert(payload.Part[1].PartNumber, Equals, 2)
- c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
-}
-
-func (s *S) TestMultiAbort(c *C) {
- testServer.Response(200, nil, InitMultiResultDump)
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("sample")
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
-
- err = multi.Abort()
- c.Assert(err, IsNil)
-
- testServer.WaitRequest()
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "DELETE")
- c.Assert(req.URL.Path, Equals, "/sample/multi")
- c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
-}
-
-func (s *S) TestListMulti(c *C) {
- testServer.Response(200, nil, ListMultiResultDump)
-
- b := s.s3.Bucket("sample")
-
- multis, prefixes, err := b.ListMulti("", "/")
- c.Assert(err, IsNil)
- c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
- c.Assert(multis, HasLen, 2)
- c.Assert(multis[0].Key, Equals, "multi1")
- c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
- c.Assert(multis[1].Key, Equals, "multi2")
- c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/sample/")
- c.Assert(req.Form["uploads"], DeepEquals, []string{""})
- c.Assert(req.Form["prefix"], DeepEquals, []string{""})
- c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
- c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
-}
diff --git a/vendor/github.com/goamz/goamz/s3/responses_test.go b/vendor/github.com/goamz/goamz/s3/responses_test.go
deleted file mode 100644
index 414ede0a7..000000000
--- a/vendor/github.com/goamz/goamz/s3/responses_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package s3_test
-
-var GetObjectErrorDump = `
-<?xml version="1.0" encoding="UTF-8"?>
-<Error>
- <Code>NoSuchBucket</Code>
- <Message>The specified bucket does not exist</Message>
- <BucketName>non-existent-bucket</BucketName>
- <RequestId>3F1B667FAD71C3D8</RequestId>
- <HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId>
-</Error>
-`
-
-var GetListResultDump1 = `
-<?xml version="1.0" encoding="UTF-8"?>
-<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
- <Name>quotes</Name>
- <Prefix>N</Prefix>
- <IsTruncated>false</IsTruncated>
- <Contents>
- <Key>Nelson</Key>
- <LastModified>2006-01-01T12:00:00.000Z</LastModified>
- <ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
- <Size>5</Size>
- <StorageClass>STANDARD</StorageClass>
- <Owner>
- <ID>bcaf161ca5fb16fd081034f</ID>
- <DisplayName>webfile</DisplayName>
- </Owner>
- </Contents>
- <Contents>
- <Key>Neo</Key>
- <LastModified>2006-01-01T12:00:00.000Z</LastModified>
- <ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
- <Size>4</Size>
- <StorageClass>STANDARD</StorageClass>
- <Owner>
- <ID>bcaf1ffd86a5fb16fd081034f</ID>
- <DisplayName>webfile</DisplayName>
- </Owner>
- </Contents>
-</ListBucketResult>
-`
-
-var GetListResultDump2 = `
-<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Name>example-bucket</Name>
- <Prefix>photos/2006/</Prefix>
- <Marker>some-marker</Marker>
- <MaxKeys>1000</MaxKeys>
- <Delimiter>/</Delimiter>
- <IsTruncated>false</IsTruncated>
-
- <CommonPrefixes>
- <Prefix>photos/2006/feb/</Prefix>
- </CommonPrefixes>
- <CommonPrefixes>
- <Prefix>photos/2006/jan/</Prefix>
- </CommonPrefixes>
-</ListBucketResult>
-`
-
-var InitMultiResultDump = `
-<?xml version="1.0" encoding="UTF-8"?>
-<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Bucket>sample</Bucket>
- <Key>multi</Key>
- <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
-</InitiateMultipartUploadResult>
-`
-
-var ListPartsResultDump1 = `
-<?xml version="1.0" encoding="UTF-8"?>
-<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Bucket>sample</Bucket>
- <Key>multi</Key>
- <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
- <Initiator>
- <ID>bb5c0f63b0b25f2d099c</ID>
- <DisplayName>joe</DisplayName>
- </Initiator>
- <Owner>
- <ID>bb5c0f63b0b25f2d099c</ID>
- <DisplayName>joe</DisplayName>
- </Owner>
- <StorageClass>STANDARD</StorageClass>
- <PartNumberMarker>0</PartNumberMarker>
- <NextPartNumberMarker>2</NextPartNumberMarker>
- <MaxParts>2</MaxParts>
- <IsTruncated>true</IsTruncated>
- <Part>
- <PartNumber>1</PartNumber>
- <LastModified>2013-01-30T13:45:51.000Z</LastModified>
- <ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
- <Size>5</Size>
- </Part>
- <Part>
- <PartNumber>2</PartNumber>
- <LastModified>2013-01-30T13:45:52.000Z</LastModified>
- <ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
- <Size>5</Size>
- </Part>
-</ListPartsResult>
-`
-
-var ListPartsResultDump2 = `
-<?xml version="1.0" encoding="UTF-8"?>
-<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Bucket>sample</Bucket>
- <Key>multi</Key>
- <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
- <Initiator>
- <ID>bb5c0f63b0b25f2d099c</ID>
- <DisplayName>joe</DisplayName>
- </Initiator>
- <Owner>
- <ID>bb5c0f63b0b25f2d099c</ID>
- <DisplayName>joe</DisplayName>
- </Owner>
- <StorageClass>STANDARD</StorageClass>
- <PartNumberMarker>2</PartNumberMarker>
- <NextPartNumberMarker>3</NextPartNumberMarker>
- <MaxParts>2</MaxParts>
- <IsTruncated>false</IsTruncated>
- <Part>
- <PartNumber>3</PartNumber>
- <LastModified>2013-01-30T13:46:50.000Z</LastModified>
- <ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
- <Size>5</Size>
- </Part>
-</ListPartsResult>
-`
-
-var ListMultiResultDump = `
-<?xml version="1.0"?>
-<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
- <KeyMarker/>
- <UploadIdMarker/>
- <NextKeyMarker>multi1</NextKeyMarker>
- <NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
- <Delimiter>/</Delimiter>
- <MaxUploads>1000</MaxUploads>
- <IsTruncated>false</IsTruncated>
- <Upload>
- <Key>multi1</Key>
- <UploadId>iUVug89pPvSswrikD</UploadId>
- <Initiator>
- <ID>bb5c0f63b0b25f2d0</ID>
- <DisplayName>gustavoniemeyer</DisplayName>
- </Initiator>
- <Owner>
- <ID>bb5c0f63b0b25f2d0</ID>
- <DisplayName>gustavoniemeyer</DisplayName>
- </Owner>
- <StorageClass>STANDARD</StorageClass>
- <Initiated>2013-01-30T18:15:47.000Z</Initiated>
- </Upload>
- <Upload>
- <Key>multi2</Key>
- <UploadId>DkirwsSvPp98guVUi</UploadId>
- <Initiator>
- <ID>bb5c0f63b0b25f2d0</ID>
- <DisplayName>joe</DisplayName>
- </Initiator>
- <Owner>
- <ID>bb5c0f63b0b25f2d0</ID>
- <DisplayName>joe</DisplayName>
- </Owner>
- <StorageClass>STANDARD</StorageClass>
- <Initiated>2013-01-30T18:15:47.000Z</Initiated>
- </Upload>
- <CommonPrefixes>
- <Prefix>a/</Prefix>
- </CommonPrefixes>
- <CommonPrefixes>
- <Prefix>b/</Prefix>
- </CommonPrefixes>
-</ListMultipartUploadsResult>
-`
-
-var NoSuchUploadErrorDump = `
-<?xml version="1.0" encoding="UTF-8"?>
-<Error>
- <Code>NoSuchUpload</Code>
- <Message>Not relevant</Message>
- <BucketName>sample</BucketName>
- <RequestId>3F1B667FAD71C3D8</RequestId>
- <HostId>kjhwqk</HostId>
-</Error>
-`
-
-var InternalErrorDump = `
-<?xml version="1.0" encoding="UTF-8"?>
-<Error>
- <Code>InternalError</Code>
- <Message>Not relevant</Message>
- <BucketName>sample</BucketName>
- <RequestId>3F1B667FAD71C3D8</RequestId>
- <HostId>kjhwqk</HostId>
-</Error>
-`
diff --git a/vendor/github.com/goamz/goamz/s3/s3.go b/vendor/github.com/goamz/goamz/s3/s3.go
deleted file mode 100644
index f27479cb4..000000000
--- a/vendor/github.com/goamz/goamz/s3/s3.go
+++ /dev/null
@@ -1,1164 +0,0 @@
-//
-// goamz - Go packages to interact with the Amazon Web Services.
-//
-// https://wiki.ubuntu.com/goamz
-//
-// Copyright (c) 2011 Canonical Ltd.
-//
-// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
-//
-
-package s3
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/md5"
- "crypto/sha1"
- "encoding/base64"
- "encoding/xml"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/goamz/goamz/aws"
-)
-
-const debug = false
-
-// The S3 type encapsulates operations with an S3 region.
-type S3 struct {
- aws.Auth
- aws.Region
-
- // ConnectTimeout is the maximum time a request attempt will
- // wait for a successful connection to be made.
- //
- // A value of zero means no timeout.
- ConnectTimeout time.Duration
-
- // ReadTimeout is the maximum time a request attempt will wait
- // for an individual read to complete.
- //
- // A value of zero means no timeout.
- ReadTimeout time.Duration
-
- // WriteTimeout is the maximum time a request attempt will
- // wait for an individual write to complete.
- //
- // A value of zero means no timeout.
- WriteTimeout time.Duration
-
- // RequestTimeout is the maximum time a request attempt can
- // take before operations return a timeout error.
- //
- // This includes connection time, any redirects, and reading
- // the response body. The timer remains running after the request
- // is made so it can interrupt reading of the response data.
- //
- // A Timeout of zero means no timeout.
- RequestTimeout time.Duration
-
- // AttemptStrategy is the attempt strategy used for requests.
- aws.AttemptStrategy
-
- // Reserve the right of using private data.
- private byte
-
- // client used for requests
- client *http.Client
-}
-
-// The Bucket type encapsulates operations with an S3 bucket.
-type Bucket struct {
- *S3
- Name string
-}
-
-// The Owner type represents the owner of the object in an S3 bucket.
-type Owner struct {
- ID string
- DisplayName string
-}
-
-// Fold options into an Options struct
-//
-type Options struct {
- SSE bool
- Meta map[string][]string
- ContentEncoding string
- CacheControl string
- RedirectLocation string
- ContentMD5 string
- // What else?
- // Content-Disposition string
- //// The following become headers so they are []strings rather than strings... I think
- // x-amz-storage-class []string
-}
-
-type CopyOptions struct {
- Options
- MetadataDirective string
- ContentType string
-}
-
-// CopyObjectResult is the output from a Copy request
-type CopyObjectResult struct {
- ETag string
- LastModified string
-}
-
-// DefaultAttemptStrategy is the default AttemptStrategy used by S3 objects created by New.
-var DefaultAttemptStrategy = aws.AttemptStrategy{
- Min: 5,
- Total: 5 * time.Second,
- Delay: 200 * time.Millisecond,
-}
-
-// New creates a new S3. Optional client argument allows for custom http.clients to be used.
-func New(auth aws.Auth, region aws.Region, client ...*http.Client) *S3 {
-
- var httpclient *http.Client
-
- if len(client) > 0 {
- httpclient = client[0]
- }
-
- return &S3{Auth: auth, Region: region, AttemptStrategy: DefaultAttemptStrategy, client: httpclient}
-}
-
-// Bucket returns a Bucket with the given name.
-func (s3 *S3) Bucket(name string) *Bucket {
- if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
- name = strings.ToLower(name)
- }
- return &Bucket{s3, name}
-}
-
-var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <LocationConstraint>%s</LocationConstraint>
-</CreateBucketConfiguration>`
-
-// locationConstraint returns an io.Reader specifying a LocationConstraint if
-// required for the region.
-//
-// See http://goo.gl/bh9Kq for details.
-func (s3 *S3) locationConstraint() io.Reader {
- constraint := ""
- if s3.Region.S3LocationConstraint {
- constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
- }
- return strings.NewReader(constraint)
-}
-
-type ACL string
-
-const (
- Private = ACL("private")
- PublicRead = ACL("public-read")
- PublicReadWrite = ACL("public-read-write")
- AuthenticatedRead = ACL("authenticated-read")
- BucketOwnerRead = ACL("bucket-owner-read")
- BucketOwnerFull = ACL("bucket-owner-full-control")
-)
-
-// PutBucket creates a new bucket.
-//
-// See http://goo.gl/ndjnR for details.
-func (b *Bucket) PutBucket(perm ACL) error {
- headers := map[string][]string{
- "x-amz-acl": {string(perm)},
- }
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: "/",
- headers: headers,
- payload: b.locationConstraint(),
- }
- return b.S3.query(req, nil)
-}
-
-// DelBucket removes an existing S3 bucket. All objects in the bucket must
-// be removed before the bucket itself can be removed.
-//
-// See http://goo.gl/GoBrY for details.
-func (b *Bucket) DelBucket() (err error) {
- req := &request{
- method: "DELETE",
- bucket: b.Name,
- path: "/",
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, nil)
- if !shouldRetry(err) {
- break
- }
- }
- return err
-}
-
-// Get retrieves an object from an S3 bucket.
-//
-// See http://goo.gl/isCO7 for details.
-func (b *Bucket) Get(path string) (data []byte, err error) {
- body, err := b.GetReader(path)
- defer func() {
- if body != nil {
- body.Close()
- }
- }()
- if err != nil {
- return nil, err
- }
- data, err = ioutil.ReadAll(body)
- return data, err
-}
-
-// GetReader retrieves an object from an S3 bucket,
-// returning the body of the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading.
-func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
- resp, err := b.GetResponse(path)
- if resp != nil {
- return resp.Body, err
- }
- return nil, err
-}
-
-// GetResponse retrieves an object from an S3 bucket,
-// returning the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading
-func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
- return b.GetResponseWithHeaders(path, make(http.Header))
-}
-
-// GetReaderWithHeaders retrieves an object from an S3 bucket
-// Accepts custom headers to be sent as the second parameter
-// returning the body of the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading
-func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) {
- req := &request{
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- err = b.S3.prepare(req)
- if err != nil {
- return nil, err
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- return resp, nil
- }
- panic("unreachable")
-}
-
-// Exists checks whether or not an object exists on an S3 bucket using a HEAD request.
-func (b *Bucket) Exists(path string) (exists bool, err error) {
- req := &request{
- method: "HEAD",
- bucket: b.Name,
- path: path,
- }
- err = b.S3.prepare(req)
- if err != nil {
- return
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
-
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
-
- if err != nil {
- // We can treat a 403 or 404 as non existance
- if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
- return false, nil
- }
- return false, err
- }
-
- if resp.StatusCode/100 == 2 {
- exists = true
- }
- return exists, err
- }
- return false, fmt.Errorf("S3 Currently Unreachable")
-}
-
-// Head HEADs an object in the S3 bucket, returns the response with
-// no body see http://bit.ly/17K1ylI
-func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
- req := &request{
- method: "HEAD",
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- err := b.S3.prepare(req)
- if err != nil {
- return nil, err
- }
-
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- return resp, err
- }
- return nil, fmt.Errorf("S3 Currently Unreachable")
-}
-
-// Put inserts an object into the S3 bucket.
-//
-// See http://goo.gl/FEBPD for details.
-func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
- body := bytes.NewBuffer(data)
- return b.PutReader(path, body, int64(len(data)), contType, perm, options)
-}
-
-// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
-func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (result *CopyObjectResult, err error) {
- headers := map[string][]string{
- "x-amz-acl": {string(perm)},
- "x-amz-copy-source": {source},
- }
- options.addHeaders(headers)
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- result = &CopyObjectResult{}
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, result)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return result, nil
-}
-
-/*
-PutHeader - like Put, inserts an object into the S3 bucket.
-Instead of Content-Type string, pass in custom headers to override defaults.
-*/
-func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
- body := bytes.NewBuffer(data)
- return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
-}
-
-// PutReader inserts an object into the S3 bucket by consuming data
-// from r until EOF.
-func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- "Content-Type": {contType},
- "x-amz-acl": {string(perm)},
- }
- options.addHeaders(headers)
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- payload: r,
- }
- return b.S3.query(req, nil)
-}
-
-/*
-PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
-Instead of Content-Type string, pass in custom headers to override defaults.
-*/
-func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
- // Default headers
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- "Content-Type": {"application/text"},
- "x-amz-acl": {string(perm)},
- }
-
- // Override with custom headers
- for key, value := range customHeaders {
- headers[key] = value
- }
-
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- payload: r,
- }
- return b.S3.query(req, nil)
-}
-
-// addHeaders adds o's specified fields to headers
-func (o Options) addHeaders(headers map[string][]string) {
- if o.SSE {
- headers["x-amz-server-side-encryption"] = []string{"AES256"}
- }
- if len(o.ContentEncoding) != 0 {
- headers["Content-Encoding"] = []string{o.ContentEncoding}
- }
- if len(o.CacheControl) != 0 {
- headers["Cache-Control"] = []string{o.CacheControl}
- }
- if len(o.ContentMD5) != 0 {
- headers["Content-MD5"] = []string{o.ContentMD5}
- }
- if len(o.RedirectLocation) != 0 {
- headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation}
- }
- for k, v := range o.Meta {
- headers["x-amz-meta-"+k] = v
- }
-}
-
-// addHeaders adds o's specified fields to headers
-func (o CopyOptions) addHeaders(headers map[string][]string) {
- o.Options.addHeaders(headers)
- if len(o.MetadataDirective) != 0 {
- headers["x-amz-metadata-directive"] = []string{o.MetadataDirective}
- }
- if len(o.ContentType) != 0 {
- headers["Content-Type"] = []string{o.ContentType}
- }
-}
-
-func makeXmlBuffer(doc []byte) *bytes.Buffer {
- buf := new(bytes.Buffer)
- buf.WriteString(xml.Header)
- buf.Write(doc)
- return buf
-}
-
-type RoutingRule struct {
- ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"`
- RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"`
- RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"`
-}
-
-type WebsiteConfiguration struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"`
- IndexDocumentSuffix string `xml:"IndexDocument>Suffix"`
- ErrorDocumentKey string `xml:"ErrorDocument>Key"`
- RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
-}
-
-func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
-
- doc, err := xml.Marshal(configuration)
- if err != nil {
- return err
- }
-
- buf := makeXmlBuffer(doc)
-
- return b.PutBucketSubresource("website", buf, int64(buf.Len()))
-}
-
-func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- }
- req := &request{
- path: "/",
- method: "PUT",
- bucket: b.Name,
- headers: headers,
- payload: r,
- params: url.Values{subresource: {""}},
- }
-
- return b.S3.query(req, nil)
-}
-
-// Del removes an object from the S3 bucket.
-//
-// See http://goo.gl/APeTt for details.
-func (b *Bucket) Del(path string) error {
- req := &request{
- method: "DELETE",
- bucket: b.Name,
- path: path,
- }
- return b.S3.query(req, nil)
-}
-
-type Delete struct {
- Quiet bool `xml:"Quiet,omitempty"`
- Objects []Object `xml:"Object"`
-}
-
-type Object struct {
- Key string `xml:"Key"`
- VersionId string `xml:"VersionId,omitempty"`
-}
-
-// DelMulti removes up to 1000 objects from the S3 bucket.
-//
-// See http://goo.gl/jx6cWK for details.
-func (b *Bucket) DelMulti(objects Delete) error {
- doc, err := xml.Marshal(objects)
- if err != nil {
- return err
- }
-
- buf := makeXmlBuffer(doc)
- digest := md5.New()
- size, err := digest.Write(buf.Bytes())
- if err != nil {
- return err
- }
-
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(int64(size), 10)},
- "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
- "Content-Type": {"text/xml"},
- }
- req := &request{
- path: "/",
- method: "POST",
- params: url.Values{"delete": {""}},
- bucket: b.Name,
- headers: headers,
- payload: buf,
- }
-
- return b.S3.query(req, nil)
-}
-
-// The ListResp type holds the results of a List bucket operation.
-type ListResp struct {
- Name string
- Prefix string
- Delimiter string
- Marker string
- NextMarker string
- MaxKeys int
-
- // IsTruncated is true if the results have been truncated because
- // there are more keys and prefixes than can fit in MaxKeys.
- // N.B. this is the opposite sense to that documented (incorrectly) in
- // http://goo.gl/YjQTc
- IsTruncated bool
- Contents []Key
- CommonPrefixes []string `xml:">Prefix"`
-}
-
-// The Key type represents an item stored in an S3 bucket.
-type Key struct {
- Key string
- LastModified string
- Size int64
- // ETag gives the hex-encoded MD5 sum of the contents,
- // surrounded with double-quotes.
- ETag string
- StorageClass string
- Owner Owner
-}
-
-// List returns information about objects in an S3 bucket.
-//
-// The prefix parameter limits the response to keys that begin with the
-// specified prefix.
-//
-// The delim parameter causes the response to group all of the keys that
-// share a common prefix up to the next delimiter in a single entry within
-// the CommonPrefixes field. You can use delimiters to separate a bucket
-// into different groupings of keys, similar to how folders would work.
-//
-// The marker parameter specifies the key to start with when listing objects
-// in a bucket. Amazon S3 lists objects in alphabetical order and
-// will return keys alphabetically greater than the marker.
-//
-// The max parameter specifies how many keys + common prefixes to return in
-// the response. The default is 1000.
-//
-// For example, given these keys in a bucket:
-//
-// index.html
-// index2.html
-// photos/2006/January/sample.jpg
-// photos/2006/February/sample2.jpg
-// photos/2006/February/sample3.jpg
-// photos/2006/February/sample4.jpg
-//
-// Listing this bucket with delimiter set to "/" would yield the
-// following result:
-//
-// &ListResp{
-// Name: "sample-bucket",
-// MaxKeys: 1000,
-// Delimiter: "/",
-// Contents: []Key{
-// {Key: "index.html", "index2.html"},
-// },
-// CommonPrefixes: []string{
-// "photos/",
-// },
-// }
-//
-// Listing the same bucket with delimiter set to "/" and prefix set to
-// "photos/2006/" would yield the following result:
-//
-// &ListResp{
-// Name: "sample-bucket",
-// MaxKeys: 1000,
-// Delimiter: "/",
-// Prefix: "photos/2006/",
-// CommonPrefixes: []string{
-// "photos/2006/February/",
-// "photos/2006/January/",
-// },
-// }
-//
-// See http://goo.gl/YjQTc for details.
-func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
- params := map[string][]string{
- "prefix": {prefix},
- "delimiter": {delim},
- "marker": {marker},
- }
- if max != 0 {
- params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
- }
- req := &request{
- bucket: b.Name,
- params: params,
- }
- result = &ListResp{}
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, result)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return result, nil
-}
-
-// The VersionsResp type holds the results of a list bucket Versions operation.
-type VersionsResp struct {
- Name string
- Prefix string
- KeyMarker string
- VersionIdMarker string
- MaxKeys int
- Delimiter string
- IsTruncated bool
- Versions []Version
- CommonPrefixes []string `xml:">Prefix"`
-}
-
-// The Version type represents an object version stored in an S3 bucket.
-type Version struct {
- Key string
- VersionId string
- IsLatest bool
- LastModified string
- // ETag gives the hex-encoded MD5 sum of the contents,
- // surrounded with double-quotes.
- ETag string
- Size int64
- Owner Owner
- StorageClass string
-}
-
-func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
- params := map[string][]string{
- "versions": {""},
- "prefix": {prefix},
- "delimiter": {delim},
- }
-
- if len(versionIdMarker) != 0 {
- params["version-id-marker"] = []string{versionIdMarker}
- }
- if len(keyMarker) != 0 {
- params["key-marker"] = []string{keyMarker}
- }
-
- if max != 0 {
- params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
- }
- req := &request{
- bucket: b.Name,
- params: params,
- }
- result = &VersionsResp{}
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, result)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return result, nil
-}
-
-// Returns a mapping of all key names in this bucket to Key objects
-func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
- bucket_contents := map[string]Key{}
- prefix := ""
- path_separator := ""
- marker := ""
- for {
- contents, err := b.List(prefix, path_separator, marker, 1000)
- if err != nil {
- return &bucket_contents, err
- }
- for _, key := range contents.Contents {
- bucket_contents[key.Key] = key
- }
- if contents.IsTruncated {
- marker = contents.NextMarker
- } else {
- break
- }
- }
-
- return &bucket_contents, nil
-}
-
-// URL returns a non-signed URL that allows retriving the
-// object at path. It only works if the object is publicly
-// readable (see SignedURL).
-func (b *Bucket) URL(path string) string {
- req := &request{
- bucket: b.Name,
- path: path,
- }
- err := b.S3.prepare(req)
- if err != nil {
- panic(err)
- }
- u, err := req.url()
- if err != nil {
- panic(err)
- }
- u.RawQuery = ""
- return u.String()
-}
-
-// SignedURL returns a signed URL that allows anyone holding the URL
-// to retrieve the object at path. The signature is valid until expires.
-func (b *Bucket) SignedURL(path string, expires time.Time) string {
- req := &request{
- bucket: b.Name,
- path: path,
- params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
- }
- err := b.S3.prepare(req)
- if err != nil {
- panic(err)
- }
- u, err := req.url()
- if err != nil {
- panic(err)
- }
- if b.S3.Auth.Token() != "" {
- return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0])
- } else {
- return u.String()
- }
-}
-
-// UploadSignedURL returns a signed URL that allows anyone holding the URL
-// to upload the object at path. The signature is valid until expires.
-// contenttype is a string like image/png
-// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself]
-func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string {
- expire_date := expires.Unix()
- if method != "POST" {
- method = "PUT"
- }
- stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path
- fmt.Println("String to sign:\n", stringToSign)
- a := b.S3.Auth
- secretKey := a.SecretKey
- accessId := a.AccessKey
- mac := hmac.New(sha1.New, []byte(secretKey))
- mac.Write([]byte(stringToSign))
- macsum := mac.Sum(nil)
- signature := base64.StdEncoding.EncodeToString([]byte(macsum))
- signature = strings.TrimSpace(signature)
-
- signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/")
- if err != nil {
- log.Println("ERROR sining url for S3 upload", err)
- return ""
- }
- signedurl.Path += path
- params := url.Values{}
- params.Add("AWSAccessKeyId", accessId)
- params.Add("Expires", strconv.FormatInt(expire_date, 10))
- params.Add("Signature", signature)
- if a.Token() != "" {
- params.Add("token", a.Token())
- }
-
- signedurl.RawQuery = params.Encode()
- return signedurl.String()
-}
-
-// PostFormArgs returns the action and input fields needed to allow anonymous
-// uploads to a bucket within the expiration limit
-func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) {
- conditions := make([]string, 0)
- fields = map[string]string{
- "AWSAccessKeyId": b.Auth.AccessKey,
- "key": path,
- }
-
- conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path))
- conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name))
- if redirect != "" {
- conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect))
- fields["success_action_redirect"] = redirect
- }
-
- vExpiration := expires.Format("2006-01-02T15:04:05Z")
- vConditions := strings.Join(conditions, ",")
- policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions)
- policy64 := base64.StdEncoding.EncodeToString([]byte(policy))
- fields["policy"] = policy64
-
- signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey))
- signer.Write([]byte(policy64))
- fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil))
-
- action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name)
- return
-}
-
-type request struct {
- method string
- bucket string
- path string
- signpath string
- params url.Values
- headers http.Header
- baseurl string
- payload io.Reader
- prepared bool
-}
-
-func (req *request) url() (*url.URL, error) {
- u, err := url.Parse(req.baseurl)
- if err != nil {
- return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
- }
- u.RawQuery = req.params.Encode()
- u.Path = req.path
- return u, nil
-}
-
-// query prepares and runs the req request.
-// If resp is not nil, the XML data contained in the response
-// body will be unmarshalled on it.
-func (s3 *S3) query(req *request, resp interface{}) error {
- err := s3.prepare(req)
- if err == nil {
- var httpResponse *http.Response
- httpResponse, err = s3.run(req, resp)
- if resp == nil && httpResponse != nil {
- httpResponse.Body.Close()
- }
- }
- return err
-}
-
-// prepare sets up req to be delivered to S3.
-func (s3 *S3) prepare(req *request) error {
- var signpath = req.path
-
- if !req.prepared {
- req.prepared = true
- if req.method == "" {
- req.method = "GET"
- }
- // Copy so they can be mutated without affecting on retries.
- params := make(url.Values)
- headers := make(http.Header)
- for k, v := range req.params {
- params[k] = v
- }
- for k, v := range req.headers {
- headers[k] = v
- }
- req.params = params
- req.headers = headers
- if !strings.HasPrefix(req.path, "/") {
- req.path = "/" + req.path
- }
- signpath = req.path
- if req.bucket != "" {
- req.baseurl = s3.Region.S3BucketEndpoint
- if req.baseurl == "" {
- // Use the path method to address the bucket.
- req.baseurl = s3.Region.S3Endpoint
- req.path = "/" + req.bucket + req.path
- } else {
- // Just in case, prevent injection.
- if strings.IndexAny(req.bucket, "/:@") >= 0 {
- return fmt.Errorf("bad S3 bucket: %q", req.bucket)
- }
- req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
- }
- signpath = "/" + req.bucket + signpath
- }
- }
-
- // Always sign again as it's not clear how far the
- // server has handled a previous attempt.
- u, err := url.Parse(req.baseurl)
- if err != nil {
- return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
- }
- reqSignpathSpaceFix := (&url.URL{Path: signpath}).String()
- req.headers["Host"] = []string{u.Host}
- req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
- if s3.Auth.Token() != "" {
- req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()}
- }
- sign(s3.Auth, req.method, reqSignpathSpaceFix, req.params, req.headers)
- return nil
-}
-
-// run sends req and returns the http response from the server.
-// If resp is not nil, the XML data contained in the response
-// body will be unmarshalled on it.
-func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
- if debug {
- log.Printf("Running S3 request: %#v", req)
- }
-
- u, err := req.url()
- if err != nil {
- return nil, err
- }
-
- hreq := http.Request{
- URL: u,
- Method: req.method,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Close: true,
- Header: req.headers,
- }
-
- if v, ok := req.headers["Content-Length"]; ok {
- hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
- delete(req.headers, "Content-Length")
- if hreq.ContentLength == 0 {
- req.payload = nil
- }
- }
- if req.payload != nil {
- hreq.Body = ioutil.NopCloser(req.payload)
- }
-
- if s3.client == nil {
- s3.client = &http.Client{
- Transport: &http.Transport{
- Dial: func(netw, addr string) (c net.Conn, err error) {
- c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout)
- if err != nil {
- return
- }
-
- var deadline time.Time
- if s3.RequestTimeout > 0 {
- deadline = time.Now().Add(s3.RequestTimeout)
- c.SetDeadline(deadline)
- }
-
- if s3.ReadTimeout > 0 || s3.WriteTimeout > 0 {
- c = &ioTimeoutConn{
- TCPConn: c.(*net.TCPConn),
- readTimeout: s3.ReadTimeout,
- writeTimeout: s3.WriteTimeout,
- requestDeadline: deadline,
- }
- }
- return
- },
- },
- }
- }
-
- hresp, err := s3.client.Do(&hreq)
- if err != nil {
- return nil, err
- }
- if debug {
- dump, _ := httputil.DumpResponse(hresp, true)
- log.Printf("} -> %s\n", dump)
- }
- if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 {
- defer hresp.Body.Close()
- return nil, buildError(hresp)
- }
- if resp != nil {
- err = xml.NewDecoder(hresp.Body).Decode(resp)
- hresp.Body.Close()
- if debug {
- log.Printf("goamz.s3> decoded xml into %#v", resp)
- }
- }
- return hresp, err
-}
-
-// Error represents an error in an operation with S3.
-type Error struct {
- StatusCode int // HTTP status code (200, 403, ...)
- Code string // EC2 error code ("UnsupportedOperation", ...)
- Message string // The human-oriented error message
- BucketName string
- RequestId string
- HostId string
-}
-
-func (e *Error) Error() string {
- return e.Message
-}
-
-func buildError(r *http.Response) error {
- if debug {
- log.Printf("got error (status code %v)", r.StatusCode)
- data, err := ioutil.ReadAll(r.Body)
- if err != nil {
- log.Printf("\tread error: %v", err)
- } else {
- log.Printf("\tdata:\n%s\n\n", data)
- }
- r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
- }
-
- err := Error{}
- // TODO return error if Unmarshal fails?
- xml.NewDecoder(r.Body).Decode(&err)
- r.Body.Close()
- err.StatusCode = r.StatusCode
- if err.Message == "" {
- err.Message = r.Status
- }
- if debug {
- log.Printf("err: %#v\n", err)
- }
- return &err
-}
-
-func shouldRetry(err error) bool {
- if err == nil {
- return false
- }
- if e, ok := err.(*url.Error); ok {
- // Transport returns this string if it detects a write on a connection which
- // has already had an error
- if e.Err.Error() == "http: can't write HTTP request on broken connection" {
- return true
- }
- err = e.Err
- }
-
- switch err {
- case io.ErrUnexpectedEOF, io.EOF:
- return true
- }
- switch e := err.(type) {
- case *net.DNSError:
- return true
- case *net.OpError:
- switch e.Op {
- case "read", "write", "WSARecv", "WSASend", "ConnectEx":
- return true
- }
- case *Error:
- switch e.Code {
- case "InternalError", "NoSuchUpload", "NoSuchBucket", "RequestTimeout":
- return true
- }
- // let's handle tls handshake timeout issues and similar temporary errors
- case net.Error:
- return e.Temporary()
- }
-
- return false
-}
-
-func hasCode(err error, code string) bool {
- s3err, ok := err.(*Error)
- return ok && s3err.Code == code
-}
-
-// ioTimeoutConn is a net.Conn which sets a deadline for each Read or Write operation
-type ioTimeoutConn struct {
- *net.TCPConn
- readTimeout time.Duration
- writeTimeout time.Duration
- requestDeadline time.Time
-}
-
-func (c *ioTimeoutConn) deadline(timeout time.Duration) time.Time {
- dl := time.Now().Add(timeout)
- if c.requestDeadline.IsZero() || dl.Before(c.requestDeadline) {
- return dl
- }
-
- return c.requestDeadline
-}
-
-func (c *ioTimeoutConn) Read(b []byte) (int, error) {
- if c.readTimeout > 0 {
- err := c.TCPConn.SetReadDeadline(c.deadline(c.readTimeout))
- if err != nil {
- return 0, err
- }
- }
- return c.TCPConn.Read(b)
-}
-
-func (c *ioTimeoutConn) Write(b []byte) (int, error) {
- if c.writeTimeout > 0 {
- err := c.TCPConn.SetWriteDeadline(c.deadline(c.writeTimeout))
- if err != nil {
- return 0, err
- }
- }
- return c.TCPConn.Write(b)
-}
diff --git a/vendor/github.com/goamz/goamz/s3/s3_test.go b/vendor/github.com/goamz/goamz/s3/s3_test.go
deleted file mode 100644
index 24d4dfcc0..000000000
--- a/vendor/github.com/goamz/goamz/s3/s3_test.go
+++ /dev/null
@@ -1,427 +0,0 @@
-package s3_test
-
-import (
- "bytes"
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- "github.com/goamz/goamz/aws"
- "github.com/goamz/goamz/s3"
- "github.com/goamz/goamz/testutil"
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) {
- TestingT(t)
-}
-
-type S struct {
- s3 *s3.S3
-}
-
-var _ = Suite(&S{})
-
-var testServer = testutil.NewHTTPServer()
-
-func (s *S) SetUpSuite(c *C) {
- testServer.Start()
- auth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
- s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
-}
-
-func (s *S) TearDownSuite(c *C) {
- s.s3.AttemptStrategy = s3.DefaultAttemptStrategy
-}
-
-func (s *S) SetUpTest(c *C) {
- s.s3.AttemptStrategy = aws.AttemptStrategy{
- Total: 300 * time.Millisecond,
- Delay: 100 * time.Millisecond,
- }
-}
-
-func (s *S) TearDownTest(c *C) {
- testServer.Flush()
-}
-
-func (s *S) DisableRetries() {
- s.s3.AttemptStrategy = aws.AttemptStrategy{}
-}
-
-// PutBucket docs: http://goo.gl/kBTCu
-
-func (s *S) TestPutBucket(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/bucket/")
- c.Assert(req.Header["Date"], Not(Equals), "")
-}
-
-// Head docs: http://bit.ly/17K1ylI
-
-func (s *S) TestHead(c *C) {
- testServer.Response(200, nil, "content")
-
- b := s.s3.Bucket("bucket")
- resp, err := b.Head("name", nil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "HEAD")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(Equals), "")
-
- c.Assert(err, IsNil)
- c.Assert(resp.ContentLength, FitsTypeOf, int64(0))
- c.Assert(resp, FitsTypeOf, &http.Response{})
-}
-
-// DeleteBucket docs: http://goo.gl/GoBrY
-
-func (s *S) TestDelBucket(c *C) {
- testServer.Response(204, nil, "")
-
- b := s.s3.Bucket("bucket")
- err := b.DelBucket()
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "DELETE")
- c.Assert(req.URL.Path, Equals, "/bucket/")
- c.Assert(req.Header["Date"], Not(Equals), "")
-}
-
-// GetObject docs: http://goo.gl/isCO7
-
-func (s *S) TestGet(c *C) {
- testServer.Response(200, nil, "content")
-
- b := s.s3.Bucket("bucket")
- data, err := b.Get("name")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(Equals), "")
-
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "content")
-}
-
-func (s *S) TestURL(c *C) {
- testServer.Response(200, nil, "content")
-
- b := s.s3.Bucket("bucket")
- url := b.URL("name")
- r, err := http.Get(url)
- c.Assert(err, IsNil)
- data, err := ioutil.ReadAll(r.Body)
- r.Body.Close()
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "content")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
-}
-
-func (s *S) TestGetReader(c *C) {
- testServer.Response(200, nil, "content")
-
- b := s.s3.Bucket("bucket")
- rc, err := b.GetReader("name")
- c.Assert(err, IsNil)
- data, err := ioutil.ReadAll(rc)
- rc.Close()
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "content")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(Equals), "")
-}
-
-func (s *S) TestGetNotFound(c *C) {
- for i := 0; i < 10; i++ {
- testServer.Response(404, nil, GetObjectErrorDump)
- }
-
- b := s.s3.Bucket("non-existent-bucket")
- data, err := b.Get("non-existent")
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
- c.Assert(req.Header["Date"], Not(Equals), "")
-
- s3err, _ := err.(*s3.Error)
- c.Assert(s3err, NotNil)
- c.Assert(s3err.StatusCode, Equals, 404)
- c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
- c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
- c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
- c.Assert(s3err.Code, Equals, "NoSuchBucket")
- c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
- c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
- c.Assert(data, IsNil)
-}
-
-// PutObject docs: http://goo.gl/FEBPD
-
-func (s *S) TestPutObject(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
- c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
- //c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
- c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
-}
-
-func (s *S) TestPutObjectReadTimeout(c *C) {
- s.s3.ReadTimeout = 50 * time.Millisecond
- defer func() {
- s.s3.ReadTimeout = 0
- }()
-
- b := s.s3.Bucket("bucket")
- err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
-
- // Make sure that we get a timeout error.
- c.Assert(err, NotNil)
-
- // Set the response after the request times out so that the next request will work.
- testServer.Response(200, nil, "")
-
- // This time set the response within our timeout period so that we expect the call
- // to return successfully.
- go func() {
- time.Sleep(25 * time.Millisecond)
- testServer.Response(200, nil, "")
- }()
- err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
- c.Assert(err, IsNil)
-}
-
-func (s *S) TestPutObjectHeader(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- err := b.PutHeader(
- "name",
- []byte("content"),
- map[string][]string{"Content-Type": {"content-type"}},
- s3.Private,
- )
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
- c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
- //c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
- c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
-}
-
-func (s *S) TestPutReader(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- buf := bytes.NewBufferString("content")
- err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{})
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
- c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
- //c.Assert(req.Header["Content-MD5"], Equals, "...")
- c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
-}
-
-func (s *S) TestPutReaderHeader(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- buf := bytes.NewBufferString("content")
- err := b.PutReaderHeader(
- "name",
- buf,
- int64(buf.Len()),
- map[string][]string{"Content-Type": {"content-type"}},
- s3.Private,
- )
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "PUT")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
- c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
- c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
- //c.Assert(req.Header["Content-MD5"], Equals, "...")
- c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
-}
-
-// DelObject docs: http://goo.gl/APeTt
-
-func (s *S) TestDelObject(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- err := b.Del("name")
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "DELETE")
- c.Assert(req.URL.Path, Equals, "/bucket/name")
- c.Assert(req.Header["Date"], Not(Equals), "")
-}
-
-func (s *S) TestDelMultiObjects(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- objects := []s3.Object{s3.Object{Key: "test"}}
- err := b.DelMulti(s3.Delete{
- Quiet: false,
- Objects: objects,
- })
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "POST")
- c.Assert(req.URL.RawQuery, Equals, "delete=")
- c.Assert(req.Header["Date"], Not(Equals), "")
- c.Assert(req.Header["Content-MD5"], Not(Equals), "")
- c.Assert(req.Header["Content-Type"], Not(Equals), "")
- c.Assert(req.ContentLength, Not(Equals), "")
-}
-
-// Bucket List Objects docs: http://goo.gl/YjQTc
-
-func (s *S) TestList(c *C) {
- testServer.Response(200, nil, GetListResultDump1)
-
- b := s.s3.Bucket("quotes")
-
- data, err := b.List("N", "", "", 0)
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/quotes/")
- c.Assert(req.Header["Date"], Not(Equals), "")
- c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
- c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
- c.Assert(req.Form["marker"], DeepEquals, []string{""})
- c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
-
- c.Assert(data.Name, Equals, "quotes")
- c.Assert(data.Prefix, Equals, "N")
- c.Assert(data.IsTruncated, Equals, false)
- c.Assert(len(data.Contents), Equals, 2)
-
- c.Assert(data.Contents[0].Key, Equals, "Nelson")
- c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
- c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
- c.Assert(data.Contents[0].Size, Equals, int64(5))
- c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
- c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
- c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
-
- c.Assert(data.Contents[1].Key, Equals, "Neo")
- c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
- c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
- c.Assert(data.Contents[1].Size, Equals, int64(4))
- c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
- c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
- c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
-}
-
-func (s *S) TestListWithDelimiter(c *C) {
- testServer.Response(200, nil, GetListResultDump2)
-
- b := s.s3.Bucket("quotes")
-
- data, err := b.List("photos/2006/", "/", "some-marker", 1000)
- c.Assert(err, IsNil)
-
- req := testServer.WaitRequest()
- c.Assert(req.Method, Equals, "GET")
- c.Assert(req.URL.Path, Equals, "/quotes/")
- c.Assert(req.Header["Date"], Not(Equals), "")
- c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
- c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
- c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
- c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
-
- c.Assert(data.Name, Equals, "example-bucket")
- c.Assert(data.Prefix, Equals, "photos/2006/")
- c.Assert(data.Delimiter, Equals, "/")
- c.Assert(data.Marker, Equals, "some-marker")
- c.Assert(data.IsTruncated, Equals, false)
- c.Assert(len(data.Contents), Equals, 0)
- c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
-}
-
-func (s *S) TestExists(c *C) {
- testServer.Response(200, nil, "")
-
- b := s.s3.Bucket("bucket")
- result, err := b.Exists("name")
-
- req := testServer.WaitRequest()
-
- c.Assert(req.Method, Equals, "HEAD")
-
- c.Assert(err, IsNil)
- c.Assert(result, Equals, true)
-}
-
-func (s *S) TestExistsNotFound404(c *C) {
- testServer.Response(404, nil, "")
-
- b := s.s3.Bucket("bucket")
- result, err := b.Exists("name")
-
- req := testServer.WaitRequest()
-
- c.Assert(req.Method, Equals, "HEAD")
-
- c.Assert(err, IsNil)
- c.Assert(result, Equals, false)
-}
-
-func (s *S) TestExistsNotFound403(c *C) {
- testServer.Response(403, nil, "")
-
- b := s.s3.Bucket("bucket")
- result, err := b.Exists("name")
-
- req := testServer.WaitRequest()
-
- c.Assert(req.Method, Equals, "HEAD")
-
- c.Assert(err, IsNil)
- c.Assert(result, Equals, false)
-}
diff --git a/vendor/github.com/goamz/goamz/s3/s3i_test.go b/vendor/github.com/goamz/goamz/s3/s3i_test.go
deleted file mode 100644
index 1b898efc4..000000000
--- a/vendor/github.com/goamz/goamz/s3/s3i_test.go
+++ /dev/null
@@ -1,590 +0,0 @@
-package s3_test
-
-import (
- "bytes"
- "crypto/md5"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "sort"
- "strings"
- "time"
-
- "github.com/goamz/goamz/aws"
- "github.com/goamz/goamz/s3"
- "github.com/goamz/goamz/testutil"
- . "gopkg.in/check.v1"
-)
-
-// AmazonServer represents an Amazon S3 server.
-type AmazonServer struct {
- auth aws.Auth
-}
-
-func (s *AmazonServer) SetUp(c *C) {
- auth, err := aws.EnvAuth()
- if err != nil {
- c.Fatal(err.Error())
- }
- s.auth = auth
-}
-
-var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
-var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
-var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
-
-// AmazonClientSuite tests the client against a live S3 server.
-type AmazonClientSuite struct {
- aws.Region
- srv AmazonServer
- ClientTests
-}
-
-func (s *AmazonClientSuite) SetUpSuite(c *C) {
- if !testutil.Amazon {
- c.Skip("live tests against AWS disabled (no -amazon)")
- }
- s.srv.SetUp(c)
- s.s3 = s3.New(s.srv.auth, s.Region)
- // In case tests were interrupted in the middle before.
- s.ClientTests.Cleanup()
-}
-
-func (s *AmazonClientSuite) TearDownTest(c *C) {
- s.ClientTests.Cleanup()
-}
-
-// AmazonDomainClientSuite tests the client against a live S3
-// server using bucket names in the endpoint domain name rather
-// than the request path.
-type AmazonDomainClientSuite struct {
- aws.Region
- srv AmazonServer
- ClientTests
-}
-
-func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
- if !testutil.Amazon {
- c.Skip("live tests against AWS disabled (no -amazon)")
- }
- s.srv.SetUp(c)
- region := s.Region
- region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
- s.s3 = s3.New(s.srv.auth, region)
- s.ClientTests.Cleanup()
-}
-
-func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
- s.ClientTests.Cleanup()
-}
-
-// ClientTests defines integration tests designed to test the client.
-// It is not used as a test suite in itself, but embedded within
-// another type.
-type ClientTests struct {
- s3 *s3.S3
- authIsBroken bool
-}
-
-func (s *ClientTests) Cleanup() {
- killBucket(testBucket(s.s3))
-}
-
-func testBucket(s *s3.S3) *s3.Bucket {
- // Watch out! If this function is corrupted and made to match with something
- // people own, killBucket will happily remove *everything* inside the bucket.
- key := s.Auth.AccessKey
- if len(key) >= 8 {
- key = s.Auth.AccessKey[:8]
- }
- return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
-}
-
-var attempts = aws.AttemptStrategy{
- Min: 5,
- Total: 20 * time.Second,
- Delay: 100 * time.Millisecond,
-}
-
-func killBucket(b *s3.Bucket) {
- var err error
- for attempt := attempts.Start(); attempt.Next(); {
- err = b.DelBucket()
- if err == nil {
- return
- }
- if _, ok := err.(*net.DNSError); ok {
- return
- }
- e, ok := err.(*s3.Error)
- if ok && e.Code == "NoSuchBucket" {
- return
- }
- if ok && e.Code == "BucketNotEmpty" {
- // Errors are ignored here. Just retry.
- resp, err := b.List("", "", "", 1000)
- if err == nil {
- for _, key := range resp.Contents {
- _ = b.Del(key.Key)
- }
- }
- multis, _, _ := b.ListMulti("", "")
- for _, m := range multis {
- _ = m.Abort()
- }
- }
- }
- message := "cannot delete test bucket"
- if err != nil {
- message += ": " + err.Error()
- }
- panic(message)
-}
-
-func get(url string) ([]byte, error) {
- for attempt := attempts.Start(); attempt.Next(); {
- resp, err := http.Get(url)
- if err != nil {
- if attempt.HasNext() {
- continue
- }
- return nil, err
- }
- data, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- if attempt.HasNext() {
- continue
- }
- return nil, err
- }
- return data, err
- }
- panic("unreachable")
-}
-
-func (s *ClientTests) TestBasicFunctionality(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.PublicRead)
- c.Assert(err, IsNil)
-
- err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{})
- c.Assert(err, IsNil)
- defer b.Del("name")
-
- data, err := b.Get("name")
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "yo!")
-
- data, err = get(b.URL("name"))
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "yo!")
-
- buf := bytes.NewBufferString("hey!")
- err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{})
- c.Assert(err, IsNil)
- defer b.Del("name2")
-
- rc, err := b.GetReader("name2")
- c.Assert(err, IsNil)
- data, err = ioutil.ReadAll(rc)
- c.Check(err, IsNil)
- c.Check(string(data), Equals, "hey!")
- rc.Close()
-
- data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "hey!")
-
- if !s.authIsBroken {
- data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
- c.Assert(err, IsNil)
- c.Assert(string(data), Matches, "(?s).*AccessDenied.*")
- }
-
- err = b.DelBucket()
- c.Assert(err, NotNil)
-
- s3err, ok := err.(*s3.Error)
- c.Assert(ok, Equals, true)
- c.Assert(s3err.Code, Equals, "BucketNotEmpty")
- c.Assert(s3err.BucketName, Equals, b.Name)
- c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty")
-
- err = b.Del("name")
- c.Assert(err, IsNil)
- err = b.Del("name2")
- c.Assert(err, IsNil)
-
- err = b.DelBucket()
- c.Assert(err, IsNil)
-}
-
-func (s *ClientTests) TestGetNotFound(c *C) {
- b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
- data, err := b.Get("non-existent")
-
- s3err, _ := err.(*s3.Error)
- c.Assert(s3err, NotNil)
- c.Assert(s3err.StatusCode, Equals, 404)
- c.Assert(s3err.Code, Equals, "NoSuchBucket")
- c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
- c.Assert(data, IsNil)
-}
-
-// Communicate with all endpoints to see if they are alive.
-func (s *ClientTests) TestRegions(c *C) {
- errs := make(chan error, len(aws.Regions))
- for _, region := range aws.Regions {
- go func(r aws.Region) {
- s := s3.New(s.s3.Auth, r)
- b := s.Bucket("goamz-" + s.Auth.AccessKey)
- _, err := b.Get("non-existent")
- errs <- err
- }(region)
- }
- for _ = range aws.Regions {
- err := <-errs
- if err != nil {
- s3_err, ok := err.(*s3.Error)
- if ok {
- c.Check(s3_err.Code, Matches, "NoSuchBucket")
- } else if _, ok = err.(*net.DNSError); ok {
- // Okay as well.
- } else {
- c.Errorf("Non-S3 error: %s", err)
- }
- } else {
- c.Errorf("Test should have errored but it seems to have succeeded")
- }
- }
-}
-
-var objectNames = []string{
- "index.html",
- "index2.html",
- "photos/2006/February/sample2.jpg",
- "photos/2006/February/sample3.jpg",
- "photos/2006/February/sample4.jpg",
- "photos/2006/January/sample.jpg",
- "test/bar",
- "test/foo",
-}
-
-func keys(names ...string) []s3.Key {
- ks := make([]s3.Key, len(names))
- for i, name := range names {
- ks[i].Key = name
- }
- return ks
-}
-
-// As the ListResp specifies all the parameters to the
-// request too, we use it to specify request parameters
-// and expected results. The Contents field is
-// used only for the key names inside it.
-var listTests = []s3.ListResp{
- // normal list.
- {
- Contents: keys(objectNames...),
- }, {
- Marker: objectNames[0],
- Contents: keys(objectNames[1:]...),
- }, {
- Marker: objectNames[0] + "a",
- Contents: keys(objectNames[1:]...),
- }, {
- Marker: "z",
- },
-
- // limited results.
- {
- MaxKeys: 2,
- Contents: keys(objectNames[0:2]...),
- IsTruncated: true,
- }, {
- MaxKeys: 2,
- Marker: objectNames[0],
- Contents: keys(objectNames[1:3]...),
- IsTruncated: true,
- }, {
- MaxKeys: 2,
- Marker: objectNames[len(objectNames)-2],
- Contents: keys(objectNames[len(objectNames)-1:]...),
- },
-
- // with delimiter
- {
- Delimiter: "/",
- CommonPrefixes: []string{"photos/", "test/"},
- Contents: keys("index.html", "index2.html"),
- }, {
- Delimiter: "/",
- Prefix: "photos/2006/",
- CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
- }, {
- Delimiter: "/",
- Prefix: "t",
- CommonPrefixes: []string{"test/"},
- }, {
- Delimiter: "/",
- MaxKeys: 1,
- Contents: keys("index.html"),
- IsTruncated: true,
- }, {
- Delimiter: "/",
- MaxKeys: 1,
- Marker: "index2.html",
- CommonPrefixes: []string{"photos/"},
- IsTruncated: true,
- }, {
- Delimiter: "/",
- MaxKeys: 1,
- Marker: "photos/",
- CommonPrefixes: []string{"test/"},
- IsTruncated: false,
- }, {
- Delimiter: "Feb",
- CommonPrefixes: []string{"photos/2006/Feb"},
- Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
- },
-}
-
-func (s *ClientTests) TestDoublePutBucket(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.PublicRead)
- c.Assert(err, IsNil)
-
- err = b.PutBucket(s3.PublicRead)
- if err != nil {
- c.Assert(err, FitsTypeOf, new(s3.Error))
- c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
- }
-}
-
-func (s *ClientTests) TestBucketList(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- objData := make(map[string][]byte)
- for i, path := range objectNames {
- data := []byte(strings.Repeat("a", i))
- err := b.Put(path, data, "text/plain", s3.Private, s3.Options{})
- c.Assert(err, IsNil)
- defer b.Del(path)
- objData[path] = data
- }
-
- for i, t := range listTests {
- c.Logf("test %d", i)
- resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
- c.Assert(err, IsNil)
- c.Check(resp.Name, Equals, b.Name)
- c.Check(resp.Delimiter, Equals, t.Delimiter)
- c.Check(resp.IsTruncated, Equals, t.IsTruncated)
- c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
- checkContents(c, resp.Contents, objData, t.Contents)
- }
-}
-
-func etag(data []byte) string {
- sum := md5.New()
- sum.Write(data)
- return fmt.Sprintf(`"%x"`, sum.Sum(nil))
-}
-
-func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
- c.Assert(contents, HasLen, len(expected))
- for i, k := range contents {
- c.Check(k.Key, Equals, expected[i].Key)
- // TODO mtime
- c.Check(k.Size, Equals, int64(len(data[k.Key])))
- c.Check(k.ETag, Equals, etag(data[k.Key]))
- }
-}
-
-func (s *ClientTests) TestMultiInitPutList(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
- c.Assert(multi.UploadId, Matches, ".+")
- defer multi.Abort()
-
- var sent []s3.Part
-
- for i := 0; i < 5; i++ {
- p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
- c.Assert(err, IsNil)
- c.Assert(p.N, Equals, i+1)
- c.Assert(p.Size, Equals, int64(8))
- c.Assert(p.ETag, Matches, ".+")
- sent = append(sent, p)
- }
-
- s3.SetListPartsMax(2)
-
- parts, err := multi.ListParts()
- c.Assert(err, IsNil)
- c.Assert(parts, HasLen, len(sent))
- for i := range parts {
- c.Assert(parts[i].N, Equals, sent[i].N)
- c.Assert(parts[i].Size, Equals, sent[i].Size)
- c.Assert(parts[i].ETag, Equals, sent[i].ETag)
- }
-
- err = multi.Complete(parts)
- s3err, failed := err.(*s3.Error)
- c.Assert(failed, Equals, true)
- c.Assert(s3err.Code, Equals, "EntityTooSmall")
-
- err = multi.Abort()
- c.Assert(err, IsNil)
- _, err = multi.ListParts()
- s3err, ok := err.(*s3.Error)
- c.Assert(ok, Equals, true)
- c.Assert(s3err.Code, Equals, "NoSuchUpload")
-}
-
-// This may take a minute or more due to the minimum size accepted S3
-// on multipart upload parts.
-func (s *ClientTests) TestMultiComplete(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
- c.Assert(multi.UploadId, Matches, ".+")
- defer multi.Abort()
-
- // Minimum size S3 accepts for all but the last part is 5MB.
- data1 := make([]byte, 5*1024*1024)
- data2 := []byte("<part 2>")
-
- part1, err := multi.PutPart(1, bytes.NewReader(data1))
- c.Assert(err, IsNil)
- part2, err := multi.PutPart(2, bytes.NewReader(data2))
- c.Assert(err, IsNil)
-
- // Purposefully reversed. The order requirement must be handled.
- err = multi.Complete([]s3.Part{part2, part1})
- c.Assert(err, IsNil)
-
- data, err := b.Get("multi")
- c.Assert(err, IsNil)
-
- c.Assert(len(data), Equals, len(data1)+len(data2))
- for i := range data1 {
- if data[i] != data1[i] {
- c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
- }
- }
- c.Assert(string(data[len(data1):]), Equals, string(data2))
-}
-
-type multiList []*s3.Multi
-
-func (l multiList) Len() int { return len(l) }
-func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
-func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-
-func (s *ClientTests) TestListMulti(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- // Ensure an empty state before testing its behavior.
- multis, _, err := b.ListMulti("", "")
- for _, m := range multis {
- err := m.Abort()
- c.Assert(err, IsNil)
- }
-
- keys := []string{
- "a/multi2",
- "a/multi3",
- "b/multi4",
- "multi1",
- }
- for _, key := range keys {
- m, err := b.InitMulti(key, "", s3.Private)
- c.Assert(err, IsNil)
- defer m.Abort()
- }
-
- // Amazon's implementation of the multiple-request listing for
- // multipart uploads in progress seems broken in multiple ways.
- // (next tokens are not provided, etc).
- //s3.SetListMultiMax(2)
-
- multis, prefixes, err := b.ListMulti("", "")
- c.Assert(err, IsNil)
- for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
- multis, prefixes, err = b.ListMulti("", "")
- c.Assert(err, IsNil)
- }
- sort.Sort(multiList(multis))
- c.Assert(prefixes, IsNil)
- var gotKeys []string
- for _, m := range multis {
- gotKeys = append(gotKeys, m.Key)
- }
- c.Assert(gotKeys, DeepEquals, keys)
- for _, m := range multis {
- c.Assert(m.Bucket, Equals, b)
- c.Assert(m.UploadId, Matches, ".+")
- }
-
- multis, prefixes, err = b.ListMulti("", "/")
- for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
- multis, prefixes, err = b.ListMulti("", "")
- c.Assert(err, IsNil)
- }
- c.Assert(err, IsNil)
- c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
- c.Assert(multis, HasLen, 1)
- c.Assert(multis[0].Bucket, Equals, b)
- c.Assert(multis[0].Key, Equals, "multi1")
- c.Assert(multis[0].UploadId, Matches, ".+")
-
- for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
- multis, prefixes, err = b.ListMulti("", "")
- c.Assert(err, IsNil)
- }
- multis, prefixes, err = b.ListMulti("a/", "/")
- c.Assert(err, IsNil)
- c.Assert(prefixes, IsNil)
- c.Assert(multis, HasLen, 2)
- c.Assert(multis[0].Bucket, Equals, b)
- c.Assert(multis[0].Key, Equals, "a/multi2")
- c.Assert(multis[0].UploadId, Matches, ".+")
- c.Assert(multis[1].Bucket, Equals, b)
- c.Assert(multis[1].Key, Equals, "a/multi3")
- c.Assert(multis[1].UploadId, Matches, ".+")
-}
-
-func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
- b := testBucket(s.s3)
- err := b.PutBucket(s3.Private)
- c.Assert(err, IsNil)
-
- multi, err := b.InitMulti("multi", "text/plain", s3.Private)
- c.Assert(err, IsNil)
- defer multi.Abort()
-
- // This tests an edge case. Amazon requires at least one
- // part for multiprat uploads to work, even the part is empty.
- parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
- c.Assert(err, IsNil)
- c.Assert(parts, HasLen, 1)
- c.Assert(parts[0].Size, Equals, int64(0))
- c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
-
- err = multi.Complete(parts)
- c.Assert(err, IsNil)
-}
diff --git a/vendor/github.com/goamz/goamz/s3/s3t_test.go b/vendor/github.com/goamz/goamz/s3/s3t_test.go
deleted file mode 100644
index 4e6f61fdb..000000000
--- a/vendor/github.com/goamz/goamz/s3/s3t_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package s3_test
-
-import (
- "github.com/goamz/goamz/aws"
- "github.com/goamz/goamz/s3"
- "github.com/goamz/goamz/s3/s3test"
- . "gopkg.in/check.v1"
-)
-
-type LocalServer struct {
- auth aws.Auth
- region aws.Region
- srv *s3test.Server
- config *s3test.Config
-}
-
-func (s *LocalServer) SetUp(c *C) {
- srv, err := s3test.NewServer(s.config)
- c.Assert(err, IsNil)
- c.Assert(srv, NotNil)
-
- s.srv = srv
- s.region = aws.Region{
- Name: "faux-region-1",
- S3Endpoint: srv.URL(),
- S3LocationConstraint: true, // s3test server requires a LocationConstraint
- }
-}
-
-// LocalServerSuite defines tests that will run
-// against the local s3test server. It includes
-// selected tests from ClientTests;
-// when the s3test functionality is sufficient, it should
-// include all of them, and ClientTests can be simply embedded.
-type LocalServerSuite struct {
- srv LocalServer
- clientTests ClientTests
-}
-
-var (
- // run tests twice, once in us-east-1 mode, once not.
- _ = Suite(&LocalServerSuite{
- srv: LocalServer{
- config: &s3test.Config{},
- },
- })
- _ = Suite(&LocalServerSuite{
- srv: LocalServer{
- config: &s3test.Config{
- Send409Conflict: true,
- },
- },
- })
-)
-
-func (s *LocalServerSuite) SetUpSuite(c *C) {
- s.srv.SetUp(c)
- s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
-
- // TODO Sadly the fake server ignores auth completely right now. :-(
- s.clientTests.authIsBroken = true
- s.clientTests.Cleanup()
-}
-
-func (s *LocalServerSuite) TearDownTest(c *C) {
- s.clientTests.Cleanup()
-}
-
-func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
- s.clientTests.TestBasicFunctionality(c)
-}
-
-func (s *LocalServerSuite) TestGetNotFound(c *C) {
- s.clientTests.TestGetNotFound(c)
-}
-
-func (s *LocalServerSuite) TestBucketList(c *C) {
- s.clientTests.TestBucketList(c)
-}
-
-func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
- s.clientTests.TestDoublePutBucket(c)
-}
diff --git a/vendor/github.com/goamz/goamz/s3/s3test/server.go b/vendor/github.com/goamz/goamz/s3/s3test/server.go
deleted file mode 100644
index 04808d3ad..000000000
--- a/vendor/github.com/goamz/goamz/s3/s3test/server.go
+++ /dev/null
@@ -1,642 +0,0 @@
-package s3test
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "encoding/hex"
- "encoding/xml"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/goamz/goamz/s3"
-)
-
-const debug = false
-
-type s3Error struct {
- statusCode int
- XMLName struct{} `xml:"Error"`
- Code string
- Message string
- BucketName string
- RequestId string
- HostId string
-}
-
-type action struct {
- srv *Server
- w http.ResponseWriter
- req *http.Request
- reqId string
-}
-
-// Config controls the internal behaviour of the Server. A nil config is the default
-// and behaves as if all configurations assume their default behaviour. Once passed
-// to NewServer, the configuration must not be modified.
-type Config struct {
- // Send409Conflict controls how the Server will respond to calls to PUT on a
- // previously existing bucket. The default is false, and corresponds to the
- // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
- // all other regions.
- // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
- Send409Conflict bool
- // Set the host string on which to serve s3test server.
- Host string
-}
-
-func (c *Config) send409Conflict() bool {
- if c != nil {
- return c.Send409Conflict
- }
- return false
-}
-
-// Server is a fake S3 server for testing purposes.
-// All of the data for the server is kept in memory.
-type Server struct {
- url string
- reqId int
- listener net.Listener
- mu sync.Mutex
- buckets map[string]*bucket
- config *Config
-}
-
-type bucket struct {
- name string
- acl s3.ACL
- ctime time.Time
- objects map[string]*object
-}
-
-type object struct {
- name string
- mtime time.Time
- meta http.Header // metadata to return with requests.
- checksum []byte // also held as Content-MD5 in meta.
- data []byte
-}
-
-// A resource encapsulates the subject of an HTTP request.
-// The resource referred to may or may not exist
-// when the request is made.
-type resource interface {
- put(a *action) interface{}
- get(a *action) interface{}
- post(a *action) interface{}
- delete(a *action) interface{}
-}
-
-func NewServer(config *Config) (*Server, error) {
- if config.Host == "" {
- config.Host = "localhost:0"
- }
-
- l, err := net.Listen("tcp", config.Host)
- if err != nil {
- return nil, fmt.Errorf("cannot listen on localhost: %v", err)
- }
- srv := &Server{
- listener: l,
- url: "http://" + l.Addr().String(),
- buckets: make(map[string]*bucket),
- config: config,
- }
- go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- srv.serveHTTP(w, req)
- }))
- return srv, nil
-}
-
-// Quit closes down the server.
-func (srv *Server) Quit() {
- srv.listener.Close()
-}
-
-// URL returns a URL for the server.
-func (srv *Server) URL() string {
- return srv.url
-}
-
-func fatalf(code int, codeStr string, errf string, a ...interface{}) {
- panic(&s3Error{
- statusCode: code,
- Code: codeStr,
- Message: fmt.Sprintf(errf, a...),
- })
-}
-
-// serveHTTP serves the S3 protocol.
-func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
- // ignore error from ParseForm as it's usually spurious.
- req.ParseForm()
-
- srv.mu.Lock()
- defer srv.mu.Unlock()
-
- if debug {
- log.Printf("s3test %q %q", req.Method, req.URL)
- }
- a := &action{
- srv: srv,
- w: w,
- req: req,
- reqId: fmt.Sprintf("%09X", srv.reqId),
- }
- srv.reqId++
-
- var r resource
- defer func() {
- switch err := recover().(type) {
- case *s3Error:
- switch r := r.(type) {
- case objectResource:
- err.BucketName = r.bucket.name
- case bucketResource:
- err.BucketName = r.name
- }
- err.RequestId = a.reqId
- // TODO HostId
- w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
- w.WriteHeader(err.statusCode)
- xmlMarshal(w, err)
- case nil:
- default:
- panic(err)
- }
- }()
-
- r = srv.resourceForURL(req.URL)
-
- var resp interface{}
- switch req.Method {
- case "PUT":
- resp = r.put(a)
- case "GET", "HEAD":
- resp = r.get(a)
- case "DELETE":
- resp = r.delete(a)
- case "POST":
- resp = r.post(a)
- default:
- fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
- }
- if resp != nil && req.Method != "HEAD" {
- xmlMarshal(w, resp)
- }
-}
-
-// xmlMarshal is the same as xml.Marshal except that
-// it panics on error. The marshalling should not fail,
-// but we want to know if it does.
-func xmlMarshal(w io.Writer, x interface{}) {
- if err := xml.NewEncoder(w).Encode(x); err != nil {
- panic(fmt.Errorf("error marshalling %#v: %v", x, err))
- }
-}
-
-// In a fully implemented test server, each of these would have
-// its own resource type.
-var unimplementedBucketResourceNames = map[string]bool{
- "acl": true,
- "lifecycle": true,
- "policy": true,
- "location": true,
- "logging": true,
- "notification": true,
- "versions": true,
- "requestPayment": true,
- "versioning": true,
- "website": true,
- "uploads": true,
-}
-
-var unimplementedObjectResourceNames = map[string]bool{
- "uploadId": true,
- "acl": true,
- "torrent": true,
- "uploads": true,
-}
-
-var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
-
-// resourceForURL returns a resource object for the given URL.
-func (srv *Server) resourceForURL(u *url.URL) (r resource) {
- m := pathRegexp.FindStringSubmatch(u.Path)
- if m == nil {
- fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
- }
- bucketName := m[2]
- objectName := m[4]
- if bucketName == "" {
- return nullResource{} // root
- }
- b := bucketResource{
- name: bucketName,
- bucket: srv.buckets[bucketName],
- }
- q := u.Query()
- if objectName == "" {
- for name := range q {
- if unimplementedBucketResourceNames[name] {
- return nullResource{}
- }
- }
- return b
-
- }
- if b.bucket == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- objr := objectResource{
- name: objectName,
- version: q.Get("versionId"),
- bucket: b.bucket,
- }
- for name := range q {
- if unimplementedObjectResourceNames[name] {
- return nullResource{}
- }
- }
- if obj := objr.bucket.objects[objr.name]; obj != nil {
- objr.object = obj
- }
- return objr
-}
-
-// nullResource has error stubs for all resource methods.
-type nullResource struct{}
-
-func notAllowed() interface{} {
- fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
- return nil
-}
-
-func (nullResource) put(a *action) interface{} { return notAllowed() }
-func (nullResource) get(a *action) interface{} { return notAllowed() }
-func (nullResource) post(a *action) interface{} { return notAllowed() }
-func (nullResource) delete(a *action) interface{} { return notAllowed() }
-
-const timeFormat = "2006-01-02T15:04:05.000Z07:00"
-
-type bucketResource struct {
- name string
- bucket *bucket // non-nil if the bucket already exists.
-}
-
-// GET on a bucket lists the objects in the bucket.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
-func (r bucketResource) get(a *action) interface{} {
- if r.bucket == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- delimiter := a.req.Form.Get("delimiter")
- marker := a.req.Form.Get("marker")
- maxKeys := -1
- if s := a.req.Form.Get("max-keys"); s != "" {
- i, err := strconv.Atoi(s)
- if err != nil || i < 0 {
- fatalf(400, "invalid value for max-keys: %q", s)
- }
- maxKeys = i
- }
- prefix := a.req.Form.Get("prefix")
- a.w.Header().Set("Content-Type", "application/xml")
-
- if a.req.Method == "HEAD" {
- return nil
- }
-
- var objs orderedObjects
-
- // first get all matching objects and arrange them in alphabetical order.
- for name, obj := range r.bucket.objects {
- if strings.HasPrefix(name, prefix) {
- objs = append(objs, obj)
- }
- }
- sort.Sort(objs)
-
- if maxKeys <= 0 {
- maxKeys = 1000
- }
- resp := &s3.ListResp{
- Name: r.bucket.name,
- Prefix: prefix,
- Delimiter: delimiter,
- Marker: marker,
- MaxKeys: maxKeys,
- }
-
- var prefixes []string
- for _, obj := range objs {
- if !strings.HasPrefix(obj.name, prefix) {
- continue
- }
- name := obj.name
- isPrefix := false
- if delimiter != "" {
- if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
- name = obj.name[:len(prefix)+i+len(delimiter)]
- if prefixes != nil && prefixes[len(prefixes)-1] == name {
- continue
- }
- isPrefix = true
- }
- }
- if name <= marker {
- continue
- }
- if len(resp.Contents)+len(prefixes) >= maxKeys {
- resp.IsTruncated = true
- break
- }
- if isPrefix {
- prefixes = append(prefixes, name)
- } else {
- // Contents contains only keys not found in CommonPrefixes
- resp.Contents = append(resp.Contents, obj.s3Key())
- }
- }
- resp.CommonPrefixes = prefixes
- return resp
-}
-
-// orderedObjects holds a slice of objects that can be sorted
-// by name.
-type orderedObjects []*object
-
-func (s orderedObjects) Len() int {
- return len(s)
-}
-func (s orderedObjects) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s orderedObjects) Less(i, j int) bool {
- return s[i].name < s[j].name
-}
-
-func (obj *object) s3Key() s3.Key {
- return s3.Key{
- Key: obj.name,
- LastModified: obj.mtime.Format(timeFormat),
- Size: int64(len(obj.data)),
- ETag: fmt.Sprintf(`"%x"`, obj.checksum),
- // TODO StorageClass
- // TODO Owner
- }
-}
-
-// DELETE on a bucket deletes the bucket if it's not empty.
-func (r bucketResource) delete(a *action) interface{} {
- b := r.bucket
- if b == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- if len(b.objects) > 0 {
- fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
- }
- delete(a.srv.buckets, b.name)
- return nil
-}
-
-// PUT on a bucket creates the bucket.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
-func (r bucketResource) put(a *action) interface{} {
- var created bool
- if r.bucket == nil {
- if !validBucketName(r.name) {
- fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
- }
- if loc := locationConstraint(a); loc == "" {
- fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
- }
- // TODO validate acl
- r.bucket = &bucket{
- name: r.name,
- // TODO default acl
- objects: make(map[string]*object),
- }
- a.srv.buckets[r.name] = r.bucket
- created = true
- }
- if !created && a.srv.config.send409Conflict() {
- fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
- }
- r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
- return nil
-}
-
-func (bucketResource) post(a *action) interface{} {
- fatalf(400, "Method", "bucket POST method not available")
- return nil
-}
-
-// validBucketName returns whether name is a valid bucket name.
-// Here are the rules, from:
-// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
-//
-// Can contain lowercase letters, numbers, periods (.), underscores (_),
-// and dashes (-). You can use uppercase letters for buckets only in the
-// US Standard region.
-//
-// Must start with a number or letter
-//
-// Must be between 3 and 255 characters long
-//
-// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
-// but the real S3 server does not seem to check that rule, so we will not
-// check it either.
-//
-func validBucketName(name string) bool {
- if len(name) < 3 || len(name) > 255 {
- return false
- }
- r := name[0]
- if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
- return false
- }
- for _, r := range name {
- switch {
- case r >= '0' && r <= '9':
- case r >= 'a' && r <= 'z':
- case r == '_' || r == '-':
- case r == '.':
- default:
- return false
- }
- }
- return true
-}
-
-var responseParams = map[string]bool{
- "content-type": true,
- "content-language": true,
- "expires": true,
- "cache-control": true,
- "content-disposition": true,
- "content-encoding": true,
-}
-
-type objectResource struct {
- name string
- version string
- bucket *bucket // always non-nil.
- object *object // may be nil.
-}
-
-const awsTimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
-
-// GET on an object gets the contents of the object.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
-func (objr objectResource) get(a *action) interface{} {
- obj := objr.object
- if obj == nil {
- fatalf(404, "NoSuchKey", "The specified key does not exist.")
- }
- h := a.w.Header()
- // add metadata
- for name, d := range obj.meta {
- h[name] = d
- }
- // override header values in response to request parameters.
- for name, vals := range a.req.Form {
- if strings.HasPrefix(name, "response-") {
- name = name[len("response-"):]
- if !responseParams[name] {
- continue
- }
- h.Set(name, vals[0])
- }
- }
- if r := a.req.Header.Get("Range"); r != "" {
- fatalf(400, "NotImplemented", "range unimplemented")
- }
- // TODO Last-Modified-Since
- // TODO If-Modified-Since
- // TODO If-Unmodified-Since
- // TODO If-Match
- // TODO If-None-Match
- // TODO Connection: close ??
- // TODO x-amz-request-id
- h.Set("Content-Length", fmt.Sprint(len(obj.data)))
- h.Set("ETag", hex.EncodeToString(obj.checksum))
- h.Set("Last-Modified", obj.mtime.Format(awsTimeFormat))
- if a.req.Method == "HEAD" {
- return nil
- }
- // TODO avoid holding the lock when writing data.
- _, err := a.w.Write(obj.data)
- if err != nil {
- // we can't do much except just log the fact.
- log.Printf("error writing data: %v", err)
- }
- return nil
-}
-
-var metaHeaders = map[string]bool{
- "Content-MD5": true,
- "x-amz-acl": true,
- "Content-Type": true,
- "Content-Encoding": true,
- "Content-Disposition": true,
-}
-
-// PUT on an object creates the object.
-func (objr objectResource) put(a *action) interface{} {
- // TODO Cache-Control header
- // TODO Expires header
- // TODO x-amz-server-side-encryption
- // TODO x-amz-storage-class
-
- // TODO is this correct, or should we erase all previous metadata?
- obj := objr.object
- if obj == nil {
- obj = &object{
- name: objr.name,
- meta: make(http.Header),
- }
- }
-
- var expectHash []byte
- if c := a.req.Header.Get("Content-MD5"); c != "" {
- var err error
- expectHash, err = base64.StdEncoding.DecodeString(c)
- if err != nil || len(expectHash) != md5.Size {
- fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
- }
- }
- sum := md5.New()
- // TODO avoid holding lock while reading data.
- data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
- if err != nil {
- fatalf(400, "TODO", "read error")
- }
- gotHash := sum.Sum(nil)
- if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
- fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
- }
- if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
- fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
- }
-
- // PUT request has been successful - save data and metadata
- for key, values := range a.req.Header {
- key = http.CanonicalHeaderKey(key)
- if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
- obj.meta[key] = values
- }
- }
- obj.data = data
- obj.checksum = gotHash
- obj.mtime = time.Now()
- objr.bucket.objects[objr.name] = obj
-
- h := a.w.Header()
- h.Set("ETag", fmt.Sprintf(`"%s"`, hex.EncodeToString(obj.checksum)))
-
- return nil
-}
-
-func (objr objectResource) delete(a *action) interface{} {
- delete(objr.bucket.objects, objr.name)
- return nil
-}
-
-func (objr objectResource) post(a *action) interface{} {
- fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
- return nil
-}
-
-type CreateBucketConfiguration struct {
- LocationConstraint string
-}
-
-// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
-// If there is no body, an empty string will be returned.
-func locationConstraint(a *action) string {
- var body bytes.Buffer
- if _, err := io.Copy(&body, a.req.Body); err != nil {
- fatalf(400, "InvalidRequest", err.Error())
- }
- if body.Len() == 0 {
- return ""
- }
- var loc CreateBucketConfiguration
- if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
- fatalf(400, "InvalidRequest", err.Error())
- }
- return loc.LocationConstraint
-}
diff --git a/vendor/github.com/goamz/goamz/s3/sign.go b/vendor/github.com/goamz/goamz/s3/sign.go
deleted file mode 100644
index 722d97d29..000000000
--- a/vendor/github.com/goamz/goamz/s3/sign.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package s3
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "encoding/base64"
- "github.com/goamz/goamz/aws"
- "log"
- "sort"
- "strings"
-)
-
-var b64 = base64.StdEncoding
-
-// ----------------------------------------------------------------------------
-// S3 signing (http://goo.gl/G1LrK)
-
-var s3ParamsToSign = map[string]bool{
- "acl": true,
- "location": true,
- "logging": true,
- "notification": true,
- "partNumber": true,
- "policy": true,
- "requestPayment": true,
- "torrent": true,
- "uploadId": true,
- "uploads": true,
- "versionId": true,
- "versioning": true,
- "versions": true,
- "response-content-type": true,
- "response-content-language": true,
- "response-expires": true,
- "response-cache-control": true,
- "response-content-disposition": true,
- "response-content-encoding": true,
- "website": true,
- "delete": true,
-}
-
-type keySortableTupleList []keySortableTuple
-
-type keySortableTuple struct {
- Key string
- TupleString string
-}
-
-func (l keySortableTupleList) StringSlice() []string {
- slice := make([]string, len(l))
- for i, v := range l {
- slice[i] = v.TupleString
- }
- return slice
-}
-
-func (l keySortableTupleList) Len() int {
- return len(l)
-}
-
-func (l keySortableTupleList) Less(i, j int) bool {
- return l[i].Key < l[j].Key
-}
-
-func (l keySortableTupleList) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
- var md5, ctype, date, xamz string
- var xamzDate bool
- var sarray keySortableTupleList
- for k, v := range headers {
- k = strings.ToLower(k)
- switch k {
- case "content-md5":
- md5 = v[0]
- case "content-type":
- ctype = v[0]
- case "date":
- if !xamzDate {
- date = v[0]
- }
- default:
- if strings.HasPrefix(k, "x-amz-") {
- vall := strings.Join(v, ",")
- sarray = append(sarray, keySortableTuple{k, k + ":" + vall})
- if k == "x-amz-date" {
- xamzDate = true
- date = ""
- }
- }
- }
- }
- if len(sarray) > 0 {
- sort.Sort(sarray)
- xamz = strings.Join(sarray.StringSlice(), "\n") + "\n"
- }
-
- expires := false
- if v, ok := params["Expires"]; ok {
- // Query string request authentication alternative.
- expires = true
- date = v[0]
- params["AWSAccessKeyId"] = []string{auth.AccessKey}
- }
-
- sarray = sarray[0:0]
- for k, v := range params {
- if s3ParamsToSign[k] {
- for _, vi := range v {
- if vi == "" {
- sarray = append(sarray, keySortableTuple{k, k})
- } else {
- // "When signing you do not encode these values."
- sarray = append(sarray, keySortableTuple{k, k + "=" + vi})
- }
- }
- }
- }
- if len(sarray) > 0 {
- sort.Sort(sarray)
- canonicalPath = canonicalPath + "?" + strings.Join(sarray.StringSlice(), "&")
- }
-
- payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
- hash := hmac.New(sha1.New, []byte(auth.SecretKey))
- hash.Write([]byte(payload))
- signature := make([]byte, b64.EncodedLen(hash.Size()))
- b64.Encode(signature, hash.Sum(nil))
-
- if expires {
- params["Signature"] = []string{string(signature)}
- } else {
- headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
- }
- if debug {
- log.Printf("Signature payload: %q", payload)
- log.Printf("Signature: %q", signature)
- }
-}
diff --git a/vendor/github.com/goamz/goamz/s3/sign_test.go b/vendor/github.com/goamz/goamz/s3/sign_test.go
deleted file mode 100644
index 112e1ca3e..000000000
--- a/vendor/github.com/goamz/goamz/s3/sign_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package s3_test
-
-import (
- "github.com/goamz/goamz/aws"
- "github.com/goamz/goamz/s3"
- . "gopkg.in/check.v1"
-)
-
-// S3 ReST authentication docs: http://goo.gl/G1LrK
-
-var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"}
-
-func (s *S) TestSignExampleObjectGet(c *C) {
- method := "GET"
- path := "/johnsmith/photos/puppy.jpg"
- headers := map[string][]string{
- "Host": {"johnsmith.s3.amazonaws.com"},
- "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
- }
- s3.Sign(testAuth, method, path, nil, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleObjectPut(c *C) {
- method := "PUT"
- path := "/johnsmith/photos/puppy.jpg"
- headers := map[string][]string{
- "Host": {"johnsmith.s3.amazonaws.com"},
- "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
- "Content-Type": {"image/jpeg"},
- "Content-Length": {"94328"},
- }
- s3.Sign(testAuth, method, path, nil, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleList(c *C) {
- method := "GET"
- path := "/johnsmith/"
- params := map[string][]string{
- "prefix": {"photos"},
- "max-keys": {"50"},
- "marker": {"puppy"},
- }
- headers := map[string][]string{
- "Host": {"johnsmith.s3.amazonaws.com"},
- "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
- "User-Agent": {"Mozilla/5.0"},
- }
- s3.Sign(testAuth, method, path, params, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleFetch(c *C) {
- method := "GET"
- path := "/johnsmith/"
- params := map[string][]string{
- "acl": {""},
- }
- headers := map[string][]string{
- "Host": {"johnsmith.s3.amazonaws.com"},
- "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
- }
- s3.Sign(testAuth, method, path, params, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleDelete(c *C) {
- method := "DELETE"
- path := "/johnsmith/photos/puppy.jpg"
- params := map[string][]string{}
- headers := map[string][]string{
- "Host": {"s3.amazonaws.com"},
- "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
- "User-Agent": {"dotnet"},
- "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
- }
- s3.Sign(testAuth, method, path, params, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleUpload(c *C) {
- method := "PUT"
- path := "/static.johnsmith.net/db-backup.dat.gz"
- params := map[string][]string{}
- headers := map[string][]string{
- "Host": {"static.johnsmith.net:8080"},
- "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
- "User-Agent": {"curl/7.15.5"},
- "x-amz-acl": {"public-read"},
- "content-type": {"application/x-download"},
- "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
- "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
- "X-Amz-Meta-FileChecksum": {"0x02661779"},
- "X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
- "Content-Disposition": {"attachment; filename=database.dat"},
- "Content-Encoding": {"gzip"},
- "Content-Length": {"5913339"},
- }
- s3.Sign(testAuth, method, path, params, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleListAllMyBuckets(c *C) {
- method := "GET"
- path := "/"
- headers := map[string][]string{
- "Host": {"s3.amazonaws.com"},
- "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
- }
- s3.Sign(testAuth, method, path, nil, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}
-
-func (s *S) TestSignExampleUnicodeKeys(c *C) {
- method := "GET"
- path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
- headers := map[string][]string{
- "Host": {"s3.amazonaws.com"},
- "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
- }
- s3.Sign(testAuth, method, path, nil, headers)
- expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
- c.Assert(headers["Authorization"], DeepEquals, []string{expected})
-}