summaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/github.com/goamz
diff options
context:
space:
mode:
author=Corey Hulen <corey@hulen.com>2015-06-14 23:53:32 -0800
committer=Corey Hulen <corey@hulen.com>2015-06-14 23:53:32 -0800
commitcf7a05f80f68b5b1c8bcc0089679dd497cec2506 (patch)
tree70007378570a6962d7c175ca96af732b71aeb6da /Godeps/_workspace/src/github.com/goamz
downloadchat-cf7a05f80f68b5b1c8bcc0089679dd497cec2506.tar.gz
chat-cf7a05f80f68b5b1c8bcc0089679dd497cec2506.tar.bz2
chat-cf7a05f80f68b5b1c8bcc0089679dd497cec2506.zip
first commit
Diffstat (limited to 'Godeps/_workspace/src/github.com/goamz')
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go74
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go58
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go431
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go211
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go124
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go121
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go29
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go243
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go357
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go525
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go17
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go409
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go371
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go202
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go1151
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go427
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go590
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go79
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go629
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go114
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go132
21 files changed, 6294 insertions, 0 deletions
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go
new file mode 100644
index 000000000..c0654f5d8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go
@@ -0,0 +1,74 @@
+package aws
+
+import (
+ "time"
+)
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully. This is an internal type used by the
+// implementation of other goamz packages.
+type AttemptStrategy struct {
+ Total time.Duration // total duration of attempt.
+ Delay time.Duration // interval between each try in the burst.
+ Min int // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+ strategy AttemptStrategy
+ last time.Time
+ end time.Time
+ force bool
+ count int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+ now := time.Now()
+ return &Attempt{
+ strategy: s,
+ last: now,
+ end: now.Add(s.Total),
+ force: true,
+ }
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+func (a *Attempt) Next() bool {
+ now := time.Now()
+ sleep := a.nextSleep(now)
+ if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+ return false
+ }
+ a.force = false
+ if sleep > 0 && a.count > 0 {
+ time.Sleep(sleep)
+ now = time.Now()
+ }
+ a.count++
+ a.last = now
+ return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+ sleep := a.strategy.Delay - now.Sub(a.last)
+ if sleep < 0 {
+ return 0
+ }
+ return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+ if a.force || a.strategy.Min > a.count {
+ return true
+ }
+ now := time.Now()
+ if now.Add(a.nextSleep(now)).Before(a.end) {
+ a.force = true
+ return true
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go
new file mode 100644
index 000000000..8ba497715
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go
@@ -0,0 +1,58 @@
+package aws_test
+
+import (
+ "time"
+
+ "github.com/goamz/goamz/aws"
+ . "gopkg.in/check.v1"
+)
+
+func (S) TestAttemptTiming(c *C) {
+ testAttempt := aws.AttemptStrategy{
+ Total: 0.25e9,
+ Delay: 0.1e9,
+ }
+ want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
+ got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
+ t0 := time.Now()
+ for a := testAttempt.Start(); a.Next(); {
+ got = append(got, time.Now().Sub(t0))
+ }
+ got = append(got, time.Now().Sub(t0))
+ c.Assert(got, HasLen, len(want))
+ const margin = 0.01e9
+ for i, got := range want {
+ lo := want[i] - margin
+ hi := want[i] + margin
+ if got < lo || got > hi {
+ c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
+ }
+ }
+}
+
+func (S) TestAttemptNextHasNext(c *C) {
+ a := aws.AttemptStrategy{}.Start()
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.Next(), Equals, false)
+
+ a = aws.AttemptStrategy{}.Start()
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.HasNext(), Equals, false)
+ c.Assert(a.Next(), Equals, false)
+
+ a = aws.AttemptStrategy{Total: 2e8}.Start()
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.HasNext(), Equals, true)
+ time.Sleep(2e8)
+ c.Assert(a.HasNext(), Equals, true)
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.Next(), Equals, false)
+
+ a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
+ time.Sleep(1e8)
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.HasNext(), Equals, true)
+ c.Assert(a.Next(), Equals, true)
+ c.Assert(a.HasNext(), Equals, false)
+ c.Assert(a.Next(), Equals, false)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go
new file mode 100644
index 000000000..cec40be7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go
@@ -0,0 +1,431 @@
+//
+// goamz - Go packages to interact with the Amazon Web Services.
+//
+// https://wiki.ubuntu.com/goamz
+//
+// Copyright (c) 2011 Canonical Ltd.
+//
+// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
+//
+package aws
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/vaughan0/go-ini"
+)
+
+// Defines the valid signers
+const (
+ V2Signature = iota
+ V4Signature = iota
+ Route53Signature = iota
+)
+
+// Defines the service endpoint and correct Signer implementation to use
+// to sign requests for this endpoint
+type ServiceInfo struct {
+ Endpoint string
+ Signer uint
+}
+
+// Region defines the URLs where AWS services may be accessed.
+//
+// See http://goo.gl/d8BP1 for more details.
+type Region struct {
+ Name string // the canonical name of this region.
+ EC2Endpoint string
+ S3Endpoint string
+ S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
+ S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
+ S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
+ SDBEndpoint string
+ SESEndpoint string
+ SNSEndpoint string
+ SQSEndpoint string
+ IAMEndpoint string
+ ELBEndpoint string
+ DynamoDBEndpoint string
+ CloudWatchServicepoint ServiceInfo
+ AutoScalingEndpoint string
+ RDSEndpoint ServiceInfo
+ STSEndpoint string
+ CloudFormationEndpoint string
+ ECSEndpoint string
+}
+
+var Regions = map[string]Region{
+ APNortheast.Name: APNortheast,
+ APSoutheast.Name: APSoutheast,
+ APSoutheast2.Name: APSoutheast2,
+ EUCentral.Name: EUCentral,
+ EUWest.Name: EUWest,
+ USEast.Name: USEast,
+ USWest.Name: USWest,
+ USWest2.Name: USWest2,
+ USGovWest.Name: USGovWest,
+ SAEast.Name: SAEast,
+ CNNorth.Name: CNNorth,
+}
+
+// Designates a signer interface suitable for signing AWS requests, params
+// should be appropriately encoded for the request before signing.
+//
+// A signer should be initialized with Auth and the appropriate endpoint.
+type Signer interface {
+ Sign(method, path string, params map[string]string)
+}
+
+// An AWS Service interface with the API to query the AWS service
+//
+// Supplied as an easy way to mock out service calls during testing.
+type AWSService interface {
+ // Queries the AWS service at a given method/path with the params and
+ // returns an http.Response and error
+ Query(method, path string, params map[string]string) (*http.Response, error)
+ // Builds an error given an XML payload in the http.Response, can be used
+ // to process an error if the status code is not 200 for example.
+ BuildError(r *http.Response) error
+}
+
+// Implements a Server Query/Post API to easily query AWS services and build
+// errors when desired
+type Service struct {
+ service ServiceInfo
+ signer Signer
+}
+
+// Create a base set of params for an action
+func MakeParams(action string) map[string]string {
+ params := make(map[string]string)
+ params["Action"] = action
+ return params
+}
+
+// Create a new AWS server to handle making requests
+func NewService(auth Auth, service ServiceInfo) (s *Service, err error) {
+ var signer Signer
+ switch service.Signer {
+ case V2Signature:
+ signer, err = NewV2Signer(auth, service)
+ // case V4Signature:
+ // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"])
+ default:
+ err = fmt.Errorf("Unsupported signer for service")
+ }
+ if err != nil {
+ return
+ }
+ s = &Service{service: service, signer: signer}
+ return
+}
+
+func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) {
+ params["Timestamp"] = time.Now().UTC().Format(time.RFC3339)
+ u, err := url.Parse(s.service.Endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = path
+
+ s.signer.Sign(method, path, params)
+ if method == "GET" {
+ u.RawQuery = multimap(params).Encode()
+ resp, err = http.Get(u.String())
+ } else if method == "POST" {
+ resp, err = http.PostForm(u.String(), multimap(params))
+ }
+
+ return
+}
+
+func (s *Service) BuildError(r *http.Response) error {
+ errors := ErrorResponse{}
+ xml.NewDecoder(r.Body).Decode(&errors)
+ var err Error
+ err = errors.Errors
+ err.RequestId = errors.RequestId
+ err.StatusCode = r.StatusCode
+ if err.Message == "" {
+ err.Message = r.Status
+ }
+ return &err
+}
+
+type ErrorResponse struct {
+ Errors Error `xml:"Error"`
+ RequestId string // A unique ID for tracking the request
+}
+
+type Error struct {
+ StatusCode int
+ Type string
+ Code string
+ Message string
+ RequestId string
+}
+
+func (err *Error) Error() string {
+ return fmt.Sprintf("Type: %s, Code: %s, Message: %s",
+ err.Type, err.Code, err.Message,
+ )
+}
+
+type Auth struct {
+ AccessKey, SecretKey string
+ token string
+ expiration time.Time
+}
+
+func (a *Auth) Token() string {
+ if a.token == "" {
+ return ""
+ }
+ if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
+ *a, _ = GetAuth("", "", "", time.Time{})
+ }
+ return a.token
+}
+
+func (a *Auth) Expiration() time.Time {
+ return a.expiration
+}
+
+// To be used with other APIs that return auth credentials such as STS
+func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth {
+ return &Auth{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ token: token,
+ expiration: expiration,
+ }
+}
+
+// ResponseMetadata
+type ResponseMetadata struct {
+ RequestId string // A unique ID for tracking the request
+}
+
+type BaseResponse struct {
+ ResponseMetadata ResponseMetadata
+}
+
+var unreserved = make([]bool, 128)
+var hex = "0123456789ABCDEF"
+
+func init() {
+ // RFC3986
+ u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
+ for _, c := range u {
+ unreserved[c] = true
+ }
+}
+
+func multimap(p map[string]string) url.Values {
+ q := make(url.Values, len(p))
+ for k, v := range p {
+ q[k] = []string{v}
+ }
+ return q
+}
+
+type credentials struct {
+ Code string
+ LastUpdated string
+ Type string
+ AccessKeyId string
+ SecretAccessKey string
+ Token string
+ Expiration string
+}
+
+// GetMetaData retrieves instance metadata about the current machine.
+//
+// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
+func GetMetaData(path string) (contents []byte, err error) {
+ url := "http://169.254.169.254/latest/meta-data/" + path
+
+ resp, err := RetryingClient.Get(url)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
+ return
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ return []byte(body), err
+}
+
+func getInstanceCredentials() (cred credentials, err error) {
+ credentialPath := "iam/security-credentials/"
+
+ // Get the instance role
+ role, err := GetMetaData(credentialPath)
+ if err != nil {
+ return
+ }
+
+ // Get the instance role credentials
+ credentialJSON, err := GetMetaData(credentialPath + string(role))
+ if err != nil {
+ return
+ }
+
+ err = json.Unmarshal([]byte(credentialJSON), &cred)
+ return
+}
+
+// GetAuth creates an Auth based on either passed in credentials,
+// environment information or instance based role credentials.
+func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) {
+ // First try passed in credentials
+ if accessKey != "" && secretKey != "" {
+ return Auth{accessKey, secretKey, token, expiration}, nil
+ }
+
+ // Next try to get auth from the shared credentials file
+ auth, err = SharedAuth()
+ if err == nil {
+ // Found auth, return
+ return
+ }
+
+ // Next try to get auth from the environment
+ auth, err = EnvAuth()
+ if err == nil {
+ // Found auth, return
+ return
+ }
+
+ // Next try getting auth from the instance role
+ cred, err := getInstanceCredentials()
+ if err == nil {
+ // Found auth, return
+ auth.AccessKey = cred.AccessKeyId
+ auth.SecretKey = cred.SecretAccessKey
+ auth.token = cred.Token
+ exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration)
+ if err != nil {
+ err = fmt.Errorf("Error Parseing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err)
+ }
+ auth.expiration = exptdate
+ return auth, err
+ }
+ err = errors.New("No valid AWS authentication found")
+ return auth, err
+}
+
+// EnvAuth creates an Auth based on environment information.
+// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
+// variables are used.
+// AWS_SESSION_TOKEN is used if present.
+func EnvAuth() (auth Auth, err error) {
+ auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
+ if auth.AccessKey == "" {
+ auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if auth.SecretKey == "" {
+ auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
+ }
+ if auth.AccessKey == "" {
+ err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
+ }
+ if auth.SecretKey == "" {
+ err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
+ }
+
+ auth.token = os.Getenv("AWS_SESSION_TOKEN")
+ return
+}
+
+// SharedAuth creates an Auth based on shared credentials stored in
+// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
+// select the profile.
+func SharedAuth() (auth Auth, err error) {
+ var profileName = os.Getenv("AWS_PROFILE")
+
+ if profileName == "" {
+ profileName = "default"
+ }
+
+ var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE")
+ if credentialsFile == "" {
+ var homeDir = os.Getenv("HOME")
+ if homeDir == "" {
+ err = errors.New("Could not get HOME")
+ return
+ }
+ credentialsFile = homeDir + "/.aws/credentials"
+ }
+
+ file, err := ini.LoadFile(credentialsFile)
+ if err != nil {
+ err = errors.New("Couldn't parse AWS credentials file")
+ return
+ }
+
+ var profile = file[profileName]
+ if profile == nil {
+ err = errors.New("Couldn't find profile in AWS credentials file")
+ return
+ }
+
+ auth.AccessKey = profile["aws_access_key_id"]
+ auth.SecretKey = profile["aws_secret_access_key"]
+
+ if auth.AccessKey == "" {
+ err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
+ }
+ if auth.SecretKey == "" {
+ err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
+ }
+ return
+}
+
+// Encode takes a string and URI-encodes it in a way suitable
+// to be used in AWS signatures.
+func Encode(s string) string {
+ encode := false
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ if c > 127 || !unreserved[c] {
+ encode = true
+ break
+ }
+ }
+ if !encode {
+ return s
+ }
+ e := make([]byte, len(s)*3)
+ ei := 0
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ if c > 127 || !unreserved[c] {
+ e[ei] = '%'
+ e[ei+1] = hex[c>>4]
+ e[ei+2] = hex[c&0xF]
+ ei += 3
+ } else {
+ e[ei] = c
+ ei += 1
+ }
+ }
+ return string(e[:ei])
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go
new file mode 100644
index 000000000..0c74a7905
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go
@@ -0,0 +1,211 @@
+package aws_test
+
+import (
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/goamz/goamz/aws"
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+var _ = Suite(&S{})
+
+type S struct {
+ environ []string
+}
+
+func (s *S) SetUpSuite(c *C) {
+ s.environ = os.Environ()
+}
+
+func (s *S) TearDownTest(c *C) {
+ os.Clearenv()
+ for _, kv := range s.environ {
+ l := strings.SplitN(kv, "=", 2)
+ os.Setenv(l[0], l[1])
+ }
+}
+
+func (s *S) TestSharedAuthNoHome(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "foo")
+ _, err := aws.SharedAuth()
+ c.Assert(err, ErrorMatches, "Could not get HOME")
+}
+
+func (s *S) TestSharedAuthNoCredentialsFile(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "foo")
+ os.Setenv("HOME", "/tmp")
+ _, err := aws.SharedAuth()
+ c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file")
+}
+
+func (s *S) TestSharedAuthNoProfileInFile(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "foo")
+
+ d, err := ioutil.TempDir("", "")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(d)
+
+ err = os.Mkdir(d+"/.aws", 0755)
+ if err != nil {
+ panic(err)
+ }
+
+ ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644)
+ os.Setenv("HOME", d)
+
+ _, err = aws.SharedAuth()
+ c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file")
+}
+
+func (s *S) TestSharedAuthNoKeysInProfile(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "bar")
+
+ d, err := ioutil.TempDir("", "")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(d)
+
+ err = os.Mkdir(d+"/.aws", 0755)
+ if err != nil {
+ panic(err)
+ }
+
+ ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644)
+ os.Setenv("HOME", d)
+
+ _, err = aws.SharedAuth()
+ c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file")
+}
+
+func (s *S) TestSharedAuthDefaultCredentials(c *C) {
+ os.Clearenv()
+
+ d, err := ioutil.TempDir("", "")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(d)
+
+ err = os.Mkdir(d+"/.aws", 0755)
+ if err != nil {
+ panic(err)
+ }
+
+ ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
+ os.Setenv("HOME", d)
+
+ auth, err := aws.SharedAuth()
+ c.Assert(err, IsNil)
+ c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestSharedAuth(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "bar")
+
+ d, err := ioutil.TempDir("", "")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(d)
+
+ err = os.Mkdir(d+"/.aws", 0755)
+ if err != nil {
+ panic(err)
+ }
+
+ ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
+ os.Setenv("HOME", d)
+
+ auth, err := aws.SharedAuth()
+ c.Assert(err, IsNil)
+ c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEnvAuthNoSecret(c *C) {
+ os.Clearenv()
+ _, err := aws.EnvAuth()
+ c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
+}
+
+func (s *S) TestEnvAuthNoAccess(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
+ _, err := aws.EnvAuth()
+ c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
+}
+
+func (s *S) TestEnvAuth(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ auth, err := aws.EnvAuth()
+ c.Assert(err, IsNil)
+ c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEnvAuthAlt(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ auth, err := aws.EnvAuth()
+ c.Assert(err, IsNil)
+ c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEnvAuthToken(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+ auth, err := aws.EnvAuth()
+ c.Assert(err, IsNil)
+ c.Assert(auth.SecretKey, Equals, "secret")
+ c.Assert(auth.AccessKey, Equals, "access")
+ c.Assert(auth.Token(), Equals, "token")
+}
+
+func (s *S) TestGetAuthStatic(c *C) {
+ exptdate := time.Now().Add(time.Hour)
+ auth, err := aws.GetAuth("access", "secret", "token", exptdate)
+ c.Assert(err, IsNil)
+ c.Assert(auth.AccessKey, Equals, "access")
+ c.Assert(auth.SecretKey, Equals, "secret")
+ c.Assert(auth.Token(), Equals, "token")
+ c.Assert(auth.Expiration(), Equals, exptdate)
+}
+
+func (s *S) TestGetAuthEnv(c *C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ auth, err := aws.GetAuth("", "", "", time.Time{})
+ c.Assert(err, IsNil)
+ c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEncode(c *C) {
+ c.Assert(aws.Encode("foo"), Equals, "foo")
+ c.Assert(aws.Encode("/"), Equals, "%2F")
+}
+
+func (s *S) TestRegionsAreNamed(c *C) {
+ for n, r := range aws.Regions {
+ c.Assert(n, Equals, r.Name)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go
new file mode 100644
index 000000000..86d2ccec8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go
@@ -0,0 +1,124 @@
+package aws
+
+import (
+ "math"
+ "net"
+ "net/http"
+ "time"
+)
+
+type RetryableFunc func(*http.Request, *http.Response, error) bool
+type WaitFunc func(try int)
+type DeadlineFunc func() time.Time
+
+type ResilientTransport struct {
+ // Timeout is the maximum amount of time a dial will wait for
+ // a connect to complete.
+ //
+ // The default is no timeout.
+ //
+ // With or without a timeout, the operating system may impose
+ // its own earlier timeout. For instance, TCP timeouts are
+ // often around 3 minutes.
+ DialTimeout time.Duration
+
+ // MaxTries, if non-zero, specifies the number of times we will retry on
+ // failure. Retries are only attempted for temporary network errors or known
+ // safe failures.
+ MaxTries int
+ Deadline DeadlineFunc
+ ShouldRetry RetryableFunc
+ Wait WaitFunc
+ transport *http.Transport
+}
+
+// Convenience method for creating an http client
+func NewClient(rt *ResilientTransport) *http.Client {
+ rt.transport = &http.Transport{
+ Dial: func(netw, addr string) (net.Conn, error) {
+ c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
+ if err != nil {
+ return nil, err
+ }
+ c.SetDeadline(rt.Deadline())
+ return c, nil
+ },
+ Proxy: http.ProxyFromEnvironment,
+ }
+ // TODO: Would be nice is ResilientTransport allowed clients to initialize
+ // with http.Transport attributes.
+ return &http.Client{
+ Transport: rt,
+ }
+}
+
+var retryingTransport = &ResilientTransport{
+ Deadline: func() time.Time {
+ return time.Now().Add(5 * time.Second)
+ },
+ DialTimeout: 10 * time.Second,
+ MaxTries: 3,
+ ShouldRetry: awsRetry,
+ Wait: ExpBackoff,
+}
+
+// Exported default client
+var RetryingClient = NewClient(retryingTransport)
+
+func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.tries(req)
+}
+
+// Retry a request a maximum of t.MaxTries times.
+// We'll only retry if the proper criteria are met.
+// If a wait function is specified, wait that amount of time
+// In between requests.
+func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
+ for try := 0; try < t.MaxTries; try += 1 {
+ res, err = t.transport.RoundTrip(req)
+
+ if !t.ShouldRetry(req, res, err) {
+ break
+ }
+ if res != nil {
+ res.Body.Close()
+ }
+ if t.Wait != nil {
+ t.Wait(try)
+ }
+ }
+
+ return
+}
+
+func ExpBackoff(try int) {
+ time.Sleep(100 * time.Millisecond *
+ time.Duration(math.Exp2(float64(try))))
+}
+
+func LinearBackoff(try int) {
+ time.Sleep(time.Duration(try*100) * time.Millisecond)
+}
+
+// Decide if we should retry a request.
+// In general, the criteria for retrying a request is described here
+// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
+func awsRetry(req *http.Request, res *http.Response, err error) bool {
+ retry := false
+
+ // Retry if there's a temporary network error.
+ if neterr, ok := err.(net.Error); ok {
+ if neterr.Temporary() {
+ retry = true
+ }
+ }
+
+ // Retry if we get a 5xx series error.
+ if res != nil {
+ if res.StatusCode >= 500 && res.StatusCode < 600 {
+ retry = true
+ }
+ }
+
+ return retry
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go
new file mode 100644
index 000000000..c66a86333
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go
@@ -0,0 +1,121 @@
+package aws_test
+
+import (
+ "fmt"
+ "github.com/goamz/goamz/aws"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+)
+
+// Retrieve the response from handler using aws.RetryingClient
+func serveAndGet(handler http.HandlerFunc) (body string, err error) {
+ ts := httptest.NewServer(handler)
+ defer ts.Close()
+ resp, err := aws.RetryingClient.Get(ts.URL)
+ if err != nil {
+ return
+ }
+ if resp.StatusCode != 200 {
+ return "", fmt.Errorf("Bad status code: %d", resp.StatusCode)
+ }
+ greeting, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return
+ }
+ return strings.TrimSpace(string(greeting)), nil
+}
+
+func TestClient_expected(t *testing.T) {
+ body := "foo bar"
+
+ resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, body)
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != body {
+ t.Fatal("Body not as expected.")
+ }
+}
+
+func TestClient_delay(t *testing.T) {
+ body := "baz"
+ wait := 4
+ resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
+ if wait < 0 {
+ // If we dipped to zero delay and still failed.
+ t.Fatal("Never succeeded.")
+ }
+ wait -= 1
+ time.Sleep(time.Second * time.Duration(wait))
+ fmt.Fprintln(w, body)
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != body {
+ t.Fatal("Body not as expected.", resp)
+ }
+}
+
+func TestClient_no4xxRetry(t *testing.T) {
+ tries := 0
+
+ // Fail once before succeeding.
+ _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
+ tries += 1
+ http.Error(w, "error", 404)
+ })
+
+ if err == nil {
+ t.Fatal("should have error")
+ }
+
+ if tries != 1 {
+ t.Fatalf("should only try once: %d", tries)
+ }
+}
+
+func TestClient_retries(t *testing.T) {
+ body := "biz"
+ failed := false
+ // Fail once before succeeding.
+ resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
+ if !failed {
+ http.Error(w, "error", 500)
+ failed = true
+ } else {
+ fmt.Fprintln(w, body)
+ }
+ })
+ if failed != true {
+ t.Error("We didn't retry!")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != body {
+ t.Fatal("Body not as expected.")
+ }
+}
+
+func TestClient_fails(t *testing.T) {
+ tries := 0
+ // Fail 3 times and return the last error.
+ _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
+ tries += 1
+ http.Error(w, "error", 500)
+ })
+ if err == nil {
+ t.Fatal(err)
+ }
+ if tries != 3 {
+ t.Fatal("Didn't retry enough")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go
new file mode 100644
index 000000000..c4aca1d72
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go
@@ -0,0 +1,29 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+)
+
+// V4Signer:
+// Exporting methods for testing
+
+func (s *V4Signer) RequestTime(req *http.Request) time.Time {
+ return s.requestTime(req)
+}
+
+func (s *V4Signer) CanonicalRequest(req *http.Request) string {
+ return s.canonicalRequest(req)
+}
+
+func (s *V4Signer) StringToSign(t time.Time, creq string) string {
+ return s.stringToSign(t, creq)
+}
+
+func (s *V4Signer) Signature(t time.Time, sts string) string {
+ return s.signature(t, sts)
+}
+
+func (s *V4Signer) Authorization(header http.Header, t time.Time, signature string) string {
+ return s.authorization(header, t, signature)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go
new file mode 100644
index 000000000..508231e7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go
@@ -0,0 +1,243 @@
+package aws
+
+var USGovWest = Region{
+ "us-gov-west-1",
+ "https://ec2.us-gov-west-1.amazonaws.com",
+ "https://s3-fips-us-gov-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "",
+ "",
+ "https://sns.us-gov-west-1.amazonaws.com",
+ "https://sqs.us-gov-west-1.amazonaws.com",
+ "https://iam.us-gov.amazonaws.com",
+ "https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
+ "https://dynamodb.us-gov-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-gov-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-gov-west-1.amazonaws.com",
+ "https://ecs.us-gov-west-1.amazonaws.com",
+}
+
+var USEast = Region{
+ "us-east-1",
+ "https://ec2.us-east-1.amazonaws.com",
+ "https://s3.amazonaws.com",
+ "",
+ false,
+ false,
+ "https://sdb.amazonaws.com",
+ "https://email.us-east-1.amazonaws.com",
+ "https://sns.us-east-1.amazonaws.com",
+ "https://sqs.us-east-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-east-1.amazonaws.com",
+ "https://dynamodb.us-east-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-east-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-east-1.amazonaws.com",
+ "https://ecs.us-east-1.amazonaws.com",
+}
+
+var USWest = Region{
+ "us-west-1",
+ "https://ec2.us-west-1.amazonaws.com",
+ "https://s3-us-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.us-west-1.amazonaws.com",
+ "",
+ "https://sns.us-west-1.amazonaws.com",
+ "https://sqs.us-west-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-west-1.amazonaws.com",
+ "https://dynamodb.us-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-west-1.amazonaws.com",
+ "https://ecs.us-west-1.amazonaws.com",
+}
+
+var USWest2 = Region{
+ "us-west-2",
+ "https://ec2.us-west-2.amazonaws.com",
+ "https://s3-us-west-2.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.us-west-2.amazonaws.com",
+ "https://email.us-west-2.amazonaws.com",
+ "https://sns.us-west-2.amazonaws.com",
+ "https://sqs.us-west-2.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-west-2.amazonaws.com",
+ "https://dynamodb.us-west-2.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
+ "https://autoscaling.us-west-2.amazonaws.com",
+ ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-west-2.amazonaws.com",
+ "https://ecs.us-west-2.amazonaws.com",
+}
+
+var EUWest = Region{
+ "eu-west-1",
+ "https://ec2.eu-west-1.amazonaws.com",
+ "https://s3-eu-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.eu-west-1.amazonaws.com",
+ "https://email.eu-west-1.amazonaws.com",
+ "https://sns.eu-west-1.amazonaws.com",
+ "https://sqs.eu-west-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.eu-west-1.amazonaws.com",
+ "https://dynamodb.eu-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.eu-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.eu-west-1.amazonaws.com",
+ "https://ecs.eu-west-1.amazonaws.com",
+}
+
+var EUCentral = Region{
+ "eu-central-1",
+ "https://ec2.eu-central-1.amazonaws.com",
+ "https://s3-eu-central-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.eu-central-1.amazonaws.com",
+ "https://email.eu-central-1.amazonaws.com",
+ "https://sns.eu-central-1.amazonaws.com",
+ "https://sqs.eu-central-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.eu-central-1.amazonaws.com",
+ "https://dynamodb.eu-central-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
+ "https://autoscaling.eu-central-1.amazonaws.com",
+ ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.eu-central-1.amazonaws.com",
+ "https://ecs.eu-central-1.amazonaws.com",
+}
+
+var APSoutheast = Region{
+ "ap-southeast-1",
+ "https://ec2.ap-southeast-1.amazonaws.com",
+ "https://s3-ap-southeast-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-southeast-1.amazonaws.com",
+ "",
+ "https://sns.ap-southeast-1.amazonaws.com",
+ "https://sqs.ap-southeast-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
+ "https://dynamodb.ap-southeast-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-southeast-1.amazonaws.com",
+ ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-southeast-1.amazonaws.com",
+ "https://ecs.ap-southeast-1.amazonaws.com",
+}
+
+var APSoutheast2 = Region{
+ "ap-southeast-2",
+ "https://ec2.ap-southeast-2.amazonaws.com",
+ "https://s3-ap-southeast-2.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-southeast-2.amazonaws.com",
+ "",
+ "https://sns.ap-southeast-2.amazonaws.com",
+ "https://sqs.ap-southeast-2.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
+ "https://dynamodb.ap-southeast-2.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-southeast-2.amazonaws.com",
+ ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-southeast-2.amazonaws.com",
+ "https://ecs.ap-southeast-2.amazonaws.com",
+}
+
+var APNortheast = Region{
+ "ap-northeast-1",
+ "https://ec2.ap-northeast-1.amazonaws.com",
+ "https://s3-ap-northeast-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-northeast-1.amazonaws.com",
+ "",
+ "https://sns.ap-northeast-1.amazonaws.com",
+ "https://sqs.ap-northeast-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
+ "https://dynamodb.ap-northeast-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-northeast-1.amazonaws.com",
+ ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-northeast-1.amazonaws.com",
+ "https://ecs.ap-northeast-1.amazonaws.com",
+}
+
+var SAEast = Region{
+ "sa-east-1",
+ "https://ec2.sa-east-1.amazonaws.com",
+ "https://s3-sa-east-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.sa-east-1.amazonaws.com",
+ "",
+ "https://sns.sa-east-1.amazonaws.com",
+ "https://sqs.sa-east-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.sa-east-1.amazonaws.com",
+ "https://dynamodb.sa-east-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
+ "https://autoscaling.sa-east-1.amazonaws.com",
+ ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.sa-east-1.amazonaws.com",
+ "https://ecs.sa-east-1.amazonaws.com",
+}
+
+var CNNorth = Region{
+ "cn-north-1",
+ "https://ec2.cn-north-1.amazonaws.com.cn",
+ "https://s3.cn-north-1.amazonaws.com.cn",
+ "",
+ true,
+ true,
+ "https://sdb.cn-north-1.amazonaws.com.cn",
+ "",
+ "https://sns.cn-north-1.amazonaws.com.cn",
+ "https://sqs.cn-north-1.amazonaws.com.cn",
+ "https://iam.cn-north-1.amazonaws.com.cn",
+ "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
+ "https://dynamodb.cn-north-1.amazonaws.com.cn",
+ ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature},
+ "https://autoscaling.cn-north-1.amazonaws.com.cn",
+ ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature},
+ "https://sts.cn-north-1.amazonaws.com.cn",
+ "https://cloudformation.cn-north-1.amazonaws.com.cn",
+ "https://ecs.cn-north-1.amazonaws.com.cn",
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go
new file mode 100644
index 000000000..5cf990525
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go
@@ -0,0 +1,357 @@
+package aws
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+type V2Signer struct {
+ auth Auth
+ service ServiceInfo
+ host string
+}
+
+var b64 = base64.StdEncoding
+
+func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) {
+ u, err := url.Parse(service.Endpoint)
+ if err != nil {
+ return nil, err
+ }
+ return &V2Signer{auth: auth, service: service, host: u.Host}, nil
+}
+
+func (s *V2Signer) Sign(method, path string, params map[string]string) {
+ params["AWSAccessKeyId"] = s.auth.AccessKey
+ params["SignatureVersion"] = "2"
+ params["SignatureMethod"] = "HmacSHA256"
+ if s.auth.Token() != "" {
+ params["SecurityToken"] = s.auth.Token()
+ }
+
+ // AWS specifies that the parameters in a signed request must
+ // be provided in the natural order of the keys. This is distinct
+ // from the natural order of the encoded value of key=value.
+ // Percent and Equals affect the sorting order.
+ var keys, sarray []string
+ for k, _ := range params {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ sarray = append(sarray, Encode(k)+"="+Encode(params[k]))
+ }
+ joined := strings.Join(sarray, "&")
+ payload := method + "\n" + s.host + "\n" + path + "\n" + joined
+ hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
+ hash.Write([]byte(payload))
+ signature := make([]byte, b64.EncodedLen(hash.Size()))
+ b64.Encode(signature, hash.Sum(nil))
+
+ params["Signature"] = string(signature)
+}
+
+// Common date formats for signing requests
+const (
+ ISO8601BasicFormat = "20060102T150405Z"
+ ISO8601BasicFormatShort = "20060102"
+)
+
+type Route53Signer struct {
+ auth Auth
+}
+
+func NewRoute53Signer(auth Auth) *Route53Signer {
+ return &Route53Signer{auth: auth}
+}
+
+// getCurrentDate fetches the date stamp from the aws servers to
+// ensure the auth headers are within 5 minutes of the server time
+func (s *Route53Signer) getCurrentDate() string {
+ response, err := http.Get("https://route53.amazonaws.com/date")
+ if err != nil {
+ fmt.Print("Unable to get date from amazon: ", err)
+ return ""
+ }
+
+ response.Body.Close()
+ return response.Header.Get("Date")
+}
+
+// Creates the authorize signature based on the date stamp and secret key
+func (s *Route53Signer) getHeaderAuthorize(message string) string {
+ hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey))
+ hmacSha256.Write([]byte(message))
+ cryptedString := hmacSha256.Sum(nil)
+
+ return base64.StdEncoding.EncodeToString(cryptedString)
+}
+
+// Adds all the required headers for AWS Route53 API to the request
+// including the authorization
+func (s *Route53Signer) Sign(req *http.Request) {
+ date := s.getCurrentDate()
+ authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s",
+ s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date))
+
+ req.Header.Set("Host", req.Host)
+ req.Header.Set("X-Amzn-Authorization", authHeader)
+ req.Header.Set("X-Amz-Date", date)
+ req.Header.Set("Content-Type", "application/xml")
+}
+
+/*
+The V4Signer encapsulates all of the functionality to sign a request with the AWS
+Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
+*/
+type V4Signer struct {
+ auth Auth
+ serviceName string
+ region Region
+}
+
+/*
+Return a new instance of a V4Signer capable of signing AWS requests.
+*/
+func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer {
+ return &V4Signer{auth: auth, serviceName: serviceName, region: region}
+}
+
+/*
+Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
+
+The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date"
+or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires
+the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from
+the request.Host.
+
+The signed request will include a new "Authorization" header indicating that the request has been signed.
+
+Any changes to the request after signing the request will invalidate the signature.
+*/
+func (s *V4Signer) Sign(req *http.Request) {
+ req.Header.Set("host", req.Host) // host header must be included as a signed header
+ t := s.requestTime(req) // Get requst time
+ creq := s.canonicalRequest(req) // Build canonical request
+ sts := s.stringToSign(t, creq) // Build string to sign
+ signature := s.signature(t, sts) // Calculate the AWS Signature Version 4
+ auth := s.authorization(req.Header, t, signature) // Create Authorization header value
+ req.Header.Set("Authorization", auth) // Add Authorization header to request
+ return
+}
+
+/*
+requestTime method will parse the time from the request "x-amz-date" or "date" headers.
+If the "x-amz-date" header is present, that will take priority over the "date" header.
+If neither header is defined or we are unable to parse either header as a valid date
+then we will create a new "x-amz-date" header with the current time.
+*/
+func (s *V4Signer) requestTime(req *http.Request) time.Time {
+
+ // Get "x-amz-date" header
+ date := req.Header.Get("x-amz-date")
+
+ // Attempt to parse as ISO8601BasicFormat
+ t, err := time.Parse(ISO8601BasicFormat, date)
+ if err == nil {
+ return t
+ }
+
+ // Attempt to parse as http.TimeFormat
+ t, err = time.Parse(http.TimeFormat, date)
+ if err == nil {
+ req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
+ return t
+ }
+
+ // Get "date" header
+ date = req.Header.Get("date")
+
+ // Attempt to parse as http.TimeFormat
+ t, err = time.Parse(http.TimeFormat, date)
+ if err == nil {
+ return t
+ }
+
+ // Create a current time header to be used
+ t = time.Now().UTC()
+ req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
+ return t
+}
+
+/*
+canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S)
+
+ CanonicalRequest =
+ HTTPRequestMethod + '\n' +
+ CanonicalURI + '\n' +
+ CanonicalQueryString + '\n' +
+ CanonicalHeaders + '\n' +
+ SignedHeaders + '\n' +
+ HexEncode(Hash(Payload))
+*/
+func (s *V4Signer) canonicalRequest(req *http.Request) string {
+ c := new(bytes.Buffer)
+ fmt.Fprintf(c, "%s\n", req.Method)
+ fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL))
+ fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL))
+ fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header))
+ fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header))
+ fmt.Fprintf(c, "%s", s.payloadHash(req))
+ return c.String()
+}
+
+func (s *V4Signer) canonicalURI(u *url.URL) string {
+ canonicalPath := u.RequestURI()
+ if u.RawQuery != "" {
+ canonicalPath = canonicalPath[:len(canonicalPath)-len(u.RawQuery)-1]
+ }
+ slash := strings.HasSuffix(canonicalPath, "/")
+ canonicalPath = path.Clean(canonicalPath)
+ if canonicalPath != "/" && slash {
+ canonicalPath += "/"
+ }
+ return canonicalPath
+}
+
+func (s *V4Signer) canonicalQueryString(u *url.URL) string {
+ var a []string
+ for k, vs := range u.Query() {
+ k = url.QueryEscape(k)
+ for _, v := range vs {
+ if v == "" {
+ a = append(a, k+"=")
+ } else {
+ v = url.QueryEscape(v)
+ a = append(a, k+"="+v)
+ }
+ }
+ }
+ sort.Strings(a)
+ return strings.Join(a, "&")
+}
+
+func (s *V4Signer) canonicalHeaders(h http.Header) string {
+ i, a := 0, make([]string, len(h))
+ for k, v := range h {
+ for j, w := range v {
+ v[j] = strings.Trim(w, " ")
+ }
+ sort.Strings(v)
+ a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",")
+ i++
+ }
+ sort.Strings(a)
+ return strings.Join(a, "\n")
+}
+
+func (s *V4Signer) signedHeaders(h http.Header) string {
+ i, a := 0, make([]string, len(h))
+ for k, _ := range h {
+ a[i] = strings.ToLower(k)
+ i++
+ }
+ sort.Strings(a)
+ return strings.Join(a, ";")
+}
+
+func (s *V4Signer) payloadHash(req *http.Request) string {
+ var b []byte
+ if req.Body == nil {
+ b = []byte("")
+ } else {
+ var err error
+ b, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ // TODO: I REALLY DON'T LIKE THIS PANIC!!!!
+ panic(err)
+ }
+ }
+ req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+ return s.hash(string(b))
+}
+
+/*
+stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu)
+
+ StringToSign =
+ Algorithm + '\n' +
+ RequestDate + '\n' +
+ CredentialScope + '\n' +
+ HexEncode(Hash(CanonicalRequest))
+*/
+func (s *V4Signer) stringToSign(t time.Time, creq string) string {
+ w := new(bytes.Buffer)
+ fmt.Fprint(w, "AWS4-HMAC-SHA256\n")
+ fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat))
+ fmt.Fprintf(w, "%s\n", s.credentialScope(t))
+ fmt.Fprintf(w, "%s", s.hash(creq))
+ return w.String()
+}
+
+func (s *V4Signer) credentialScope(t time.Time) string {
+ return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName)
+}
+
+/*
+signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1)
+
+ signature = HexEncode(HMAC(derived-signing-key, string-to-sign))
+*/
+func (s *V4Signer) signature(t time.Time, sts string) string {
+ h := s.hmac(s.derivedKey(t), []byte(sts))
+ return fmt.Sprintf("%x", h)
+}
+
+/*
+derivedKey method derives a signing key to be used for signing a request.
+
+ kSecret = Your AWS Secret Access Key
+ kDate = HMAC("AWS4" + kSecret, Date)
+ kRegion = HMAC(kDate, Region)
+ kService = HMAC(kRegion, Service)
+ kSigning = HMAC(kService, "aws4_request")
+*/
+func (s *V4Signer) derivedKey(t time.Time) []byte {
+ h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort)))
+ h = s.hmac(h, []byte(s.region.Name))
+ h = s.hmac(h, []byte(s.serviceName))
+ h = s.hmac(h, []byte("aws4_request"))
+ return h
+}
+
+/*
+authorization method generates the authorization header value.
+*/
+func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string {
+ w := new(bytes.Buffer)
+ fmt.Fprint(w, "AWS4-HMAC-SHA256 ")
+ fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t))
+ fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header))
+ fmt.Fprintf(w, "Signature=%s", signature)
+ return w.String()
+}
+
+// hash method calculates the sha256 hash for a given string
+func (s *V4Signer) hash(in string) string {
+ h := sha256.New()
+ fmt.Fprintf(h, "%s", in)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+// hmac method calculates the sha256 hmac for a given slice of bytes
+func (s *V4Signer) hmac(key, data []byte) []byte {
+ h := hmac.New(sha256.New, key)
+ h.Write(data)
+ return h.Sum(nil)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go
new file mode 100644
index 000000000..c6b685e20
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go
@@ -0,0 +1,525 @@
+package aws_test
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/goamz/goamz/aws"
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&V4SignerSuite{})
+
+type V4SignerSuite struct {
+ auth aws.Auth
+ region aws.Region
+ cases []V4SignerSuiteCase
+}
+
+type V4SignerSuiteCase struct {
+ label string
+ request V4SignerSuiteCaseRequest
+ canonicalRequest string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+type V4SignerSuiteCaseRequest struct {
+ method string
+ host string
+ url string
+ headers []string
+ body string
+}
+
+func (s *V4SignerSuite) SetUpSuite(c *C) {
+ s.auth = aws.Auth{AccessKey: "AKIDEXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"}
+ s.region = aws.USEast
+
+ // Test cases from the Signature Version 4 Test Suite (http://goo.gl/nguvs0)
+ s.cases = append(s.cases,
+
+ // get-header-key-duplicate
+ V4SignerSuiteCase{
+ label: "get-header-key-duplicate",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar", "zoo:foobar", "zoo:zoobar"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:foobar,zoobar,zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3c52f0eaae2b61329c0a332e3fa15842a37bc5812cf4d80eb64784308850e313",
+ signature: "54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
+ },
+
+ // get-header-value-order
+ V4SignerSuiteCase{
+ label: "get-header-value-order",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p:z", "p:a", "p:p", "p:a"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:a,a,p,z\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n94c0389fefe0988cbbedc8606f0ca0b485b48da010d09fc844b45b697c8924fe",
+ signature: "d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
+ },
+
+ // get-header-value-trim
+ V4SignerSuiteCase{
+ label: "get-header-value-trim",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p: phfft "},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:phfft\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndddd1902add08da1ac94782b05f9278c08dc7468db178a84f8950d93b30b1f35",
+ signature: "debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ },
+
+ // get-relative-relative
+ V4SignerSuiteCase{
+ label: "get-relative-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/foo/bar/../..",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-relative
+ V4SignerSuiteCase{
+ label: "get-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/foo/..",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slash-dot-slash
+ V4SignerSuiteCase{
+ label: "get-slash-dot-slash",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/./",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slash-pointless-dot
+ V4SignerSuiteCase{
+ label: "get-slash-pointless-dot",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/./foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n8021a97572ee460f87ca67f4e8c0db763216d84715f5424a843a5312a3321e2d",
+ signature: "910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ },
+
+ // get-slash
+ V4SignerSuiteCase{
+ label: "get-slash",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "//",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slashes
+ V4SignerSuiteCase{
+ label: "get-slashes",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "//foo//",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/foo/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n6bb4476ee8745730c9cb79f33a0c70baa6d8af29c0077fa12e4e8f1dd17e7098",
+ signature: "b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
+ },
+
+ // get-space
+ V4SignerSuiteCase{
+ label: "get-space",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/%20/foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/%20/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n69c45fb9fe3fd76442b5086e50b2e9fec8298358da957b293ef26e506fdfb54b",
+ signature: "f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
+ },
+
+ // get-unreserved
+ V4SignerSuiteCase{
+ label: "get-unreserved",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndf63ee3247c0356c696a3b21f8d8490b01fa9cd5bc6550ef5ef5f4636b7b8901",
+ signature: "830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
+ },
+
+ // get-utf8
+ V4SignerSuiteCase{
+ label: "get-utf8",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/%E1%88%B4",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/%E1%88%B4\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n27ba31df5dbc6e063d8f87d62eb07143f7f271c5330a917840586ac1c85b6f6b",
+ signature: "8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ },
+
+ // get-vanilla-empty-query-key
+ V4SignerSuiteCase{
+ label: "get-vanilla-empty-query-key",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n0846c2945b0832deb7a463c66af5c4f8bd54ec28c438e67a214445b157c9ddf8",
+ signature: "56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
+ },
+
+ // get-vanilla-query-order-key-case
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-key-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=Zoo&foo=aha",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=Zoo&foo=aha\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ne25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d",
+ signature: "be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ },
+
+ // get-vanilla-query-order-key
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-key",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?a=foo&b=foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\na=foo&b=foo\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n2f23d14fe13caebf6dfda346285c6d9c14f49eaca8f5ec55c627dd7404f7a727",
+ signature: "0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
+ },
+
+ // get-vanilla-query-order-value
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-value",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=b&foo=a",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=a&foo=b\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n33dffc220e89131f8f6157a35c40903daa658608d9129ff9489e5cf5bbd9b11b",
+ signature: "feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ },
+
+ // get-vanilla-query-unreserved
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-unreserved",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nd2578f3156d4c9d180713d1ff20601d8a3eed0dd35447d24603d7d67414bd6b5",
+ signature: "f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
+ },
+
+ // get-vanilla-query
+ V4SignerSuiteCase{
+ label: "get-vanilla-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-vanilla-ut8-query
+ V4SignerSuiteCase{
+ label: "get-vanilla-ut8-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?ሴ=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n%E1%88%B4=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nde5065ff39c131e6c2e2bd19cd9345a794bf3b561eab20b8d97b2093fc2a979e",
+ signature: "6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ },
+
+ // get-vanilla
+ V4SignerSuiteCase{
+ label: "get-vanilla",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // post-header-key-case
+ V4SignerSuiteCase{
+ label: "post-header-key-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
+ signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ },
+
+ // post-header-key-sort
+ V4SignerSuiteCase{
+ label: "post-header-key-sort",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n34e1bddeb99e76ee01d63b5e28656111e210529efeec6cdfd46a48e4c734545d",
+ signature: "b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ },
+
+ // post-header-value-case
+ V4SignerSuiteCase{
+ label: "post-header-value-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "zoo:ZOOBAR"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:ZOOBAR\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3aae6d8274b8c03e2cc96fc7d6bda4b9bd7a0a184309344470b2c96953e124aa",
+ signature: "273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ },
+
+ // post-vanilla-empty-query-value
+ V4SignerSuiteCase{
+ label: "post-vanilla-empty-query-value",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
+ signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ },
+
+ // post-vanilla-query
+ V4SignerSuiteCase{
+ label: "post-vanilla-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
+ signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ },
+
+ // post-vanilla
+ V4SignerSuiteCase{
+ label: "post-vanilla",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
+ signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ },
+
+ // post-x-www-form-urlencoded-parameters
+ V4SignerSuiteCase{
+ label: "post-x-www-form-urlencoded-parameters",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Content-Type:application/x-www-form-urlencoded; charset=utf8", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ body: "foo=bar",
+ },
+ canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf8\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nc4115f9e54b5cecf192b1eaa23b8e88ed8dc5391bd4fde7b3fff3d9c9fe0af1f",
+ signature: "b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
+ },
+
+ // post-x-www-form-urlencoded
+ V4SignerSuiteCase{
+ label: "post-x-www-form-urlencoded",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Content-Type:application/x-www-form-urlencoded", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ body: "foo=bar",
+ },
+ canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n4c5c6e4b52fb5fb947a8733982a8a5a61b14f04345cbfe6e739236c76dd48f74",
+ signature: "5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ },
+ )
+}
+
+func (s *V4SignerSuite) TestCases(c *C) {
+ signer := aws.NewV4Signer(s.auth, "host", s.region)
+
+ for _, testCase := range s.cases {
+
+ req, err := http.NewRequest(testCase.request.method, "http://"+testCase.request.host+testCase.request.url, strings.NewReader(testCase.request.body))
+ c.Assert(err, IsNil, Commentf("Testcase: %s", testCase.label))
+ for _, v := range testCase.request.headers {
+ h := strings.SplitN(v, ":", 2)
+ req.Header.Add(h[0], h[1])
+ }
+ req.Header.Set("host", req.Host)
+
+ t := signer.RequestTime(req)
+
+ canonicalRequest := signer.CanonicalRequest(req)
+ c.Check(canonicalRequest, Equals, testCase.canonicalRequest, Commentf("Testcase: %s", testCase.label))
+
+ stringToSign := signer.StringToSign(t, canonicalRequest)
+ c.Check(stringToSign, Equals, testCase.stringToSign, Commentf("Testcase: %s", testCase.label))
+
+ signature := signer.Signature(t, stringToSign)
+ c.Check(signature, Equals, testCase.signature, Commentf("Testcase: %s", testCase.label))
+
+ authorization := signer.Authorization(req.Header, t, signature)
+ c.Check(authorization, Equals, testCase.authorization, Commentf("Testcase: %s", testCase.label))
+
+ signer.Sign(req)
+ c.Check(req.Header.Get("Authorization"), Equals, testCase.authorization, Commentf("Testcase: %s", testCase.label))
+ }
+}
+
+func ExampleV4Signer() {
+ // Get auth from env vars
+ auth, err := aws.EnvAuth()
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ // Create a signer with the auth, name of the service, and aws region
+ signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast)
+
+ // Create a request
+ req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request"))
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ // Date or x-amz-date header is required to sign a request
+ req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat))
+
+ // Sign the request
+ signer.Sign(req)
+
+ // Issue signed request
+ http.DefaultClient.Do(req)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go
new file mode 100644
index 000000000..4ff913cde
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go
@@ -0,0 +1,17 @@
+package s3
+
+import (
+ "github.com/goamz/goamz/aws"
+)
+
+func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
+ sign(auth, method, path, params, headers)
+}
+
+func SetListPartsMax(n int) {
+ listPartsMax = n
+}
+
+func SetListMultiMax(n int) {
+ listMultiMax = n
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go
new file mode 100644
index 000000000..1533bda9d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go
@@ -0,0 +1,409 @@
+package s3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "io"
+ "sort"
+ "strconv"
+)
+
+// Multi represents an unfinished multipart upload.
+//
+// Multipart uploads allow sending big objects in smaller chunks.
+// After all parts have been sent, the upload must be explicitly
+// completed by calling Complete with the list of parts.
+//
+// See http://goo.gl/vJfTG for an overview of multipart uploads.
+type Multi struct {
+ Bucket *Bucket
+ Key string
+ UploadId string
+}
+
+// That's the default. Here just for testing.
+var listMultiMax = 1000
+
+type listMultiResp struct {
+ NextKeyMarker string
+ NextUploadIdMarker string
+ IsTruncated bool
+ Upload []Multi
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+}
+
+// ListMulti returns the list of unfinished multipart uploads in b.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix. You can use prefixes to separate a bucket into different
+// groupings of keys (to get the feeling of folders, for example).
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+// See http://goo.gl/ePioY for details.
+func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
+ params := map[string][]string{
+ "uploads": {""},
+ "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
+ "prefix": {prefix},
+ "delimiter": {delim},
+ }
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ req := &request{
+ method: "GET",
+ bucket: b.Name,
+ params: params,
+ }
+ var resp listMultiResp
+ err := b.S3.query(req, &resp)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ for i := range resp.Upload {
+ multi := &resp.Upload[i]
+ multi.Bucket = b
+ multis = append(multis, multi)
+ }
+ prefixes = append(prefixes, resp.CommonPrefixes...)
+ if !resp.IsTruncated {
+ return multis, prefixes, nil
+ }
+ params["key-marker"] = []string{resp.NextKeyMarker}
+ params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
+ attempt = b.S3.AttemptStrategy.Start() // Last request worked.
+ }
+ panic("unreachable")
+}
+
+// Multi returns a multipart upload handler for the provided key
+// inside b. If a multipart upload exists for key, it is returned,
+// otherwise a new multipart upload is initiated with contType and perm.
+func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
+ multis, _, err := b.ListMulti(key, "")
+ if err != nil && !hasCode(err, "NoSuchUpload") {
+ return nil, err
+ }
+ for _, m := range multis {
+ if m.Key == key {
+ return m, nil
+ }
+ }
+ return b.InitMulti(key, contType, perm)
+}
+
+// InitMulti initializes a new multipart upload at the provided
+// key inside b and returns a value for manipulating it.
+//
+// See http://goo.gl/XP8kL for details.
+func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
+ headers := map[string][]string{
+ "Content-Type": {contType},
+ "Content-Length": {"0"},
+ "x-amz-acl": {string(perm)},
+ }
+ params := map[string][]string{
+ "uploads": {""},
+ }
+ req := &request{
+ method: "POST",
+ bucket: b.Name,
+ path: key,
+ headers: headers,
+ params: params,
+ }
+ var err error
+ var resp struct {
+ UploadId string `xml:"UploadId"`
+ }
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ err = b.S3.query(req, &resp)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
+}
+
+// PutPart sends part n of the multipart upload, reading all the content from r.
+// Each part, except for the last one, must be at least 5MB in size.
+//
+// See http://goo.gl/pqZer for details.
+func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
+ partSize, _, md5b64, err := seekerInfo(r)
+ if err != nil {
+ return Part{}, err
+ }
+ return m.putPart(n, r, partSize, md5b64)
+}
+
+func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(partSize, 10)},
+ "Content-MD5": {md5b64},
+ }
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ "partNumber": {strconv.FormatInt(int64(n), 10)},
+ }
+ for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
+ _, err := r.Seek(0, 0)
+ if err != nil {
+ return Part{}, err
+ }
+ req := &request{
+ method: "PUT",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ headers: headers,
+ params: params,
+ payload: r,
+ }
+ err = m.Bucket.S3.prepare(req)
+ if err != nil {
+ return Part{}, err
+ }
+ resp, err := m.Bucket.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return Part{}, err
+ }
+ etag := resp.Header.Get("ETag")
+ if etag == "" {
+ return Part{}, errors.New("part upload succeeded with no ETag")
+ }
+ return Part{n, etag, partSize}, nil
+ }
+ panic("unreachable")
+}
+
+func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
+ _, err = r.Seek(0, 0)
+ if err != nil {
+ return 0, "", "", err
+ }
+ digest := md5.New()
+ size, err = io.Copy(digest, r)
+ if err != nil {
+ return 0, "", "", err
+ }
+ sum := digest.Sum(nil)
+ md5hex = hex.EncodeToString(sum)
+ md5b64 = base64.StdEncoding.EncodeToString(sum)
+ return size, md5hex, md5b64, nil
+}
+
+type Part struct {
+ N int `xml:"PartNumber"`
+ ETag string
+ Size int64
+}
+
+type partSlice []Part
+
+func (s partSlice) Len() int { return len(s) }
+func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
+func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type listPartsResp struct {
+ NextPartNumberMarker string
+ IsTruncated bool
+ Part []Part
+}
+
+// That's the default. Here just for testing.
+var listPartsMax = 1000
+
+// ListParts returns the list of previously uploaded parts in m,
+// ordered by part number.
+//
+// See http://goo.gl/ePioY for details.
+func (m *Multi) ListParts() ([]Part, error) {
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
+ }
+ var parts partSlice
+ for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
+ req := &request{
+ method: "GET",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ }
+ var resp listPartsResp
+ err := m.Bucket.S3.query(req, &resp)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ parts = append(parts, resp.Part...)
+ if !resp.IsTruncated {
+ sort.Sort(parts)
+ return parts, nil
+ }
+ params["part-number-marker"] = []string{resp.NextPartNumberMarker}
+ attempt = m.Bucket.S3.AttemptStrategy.Start() // Last request worked.
+ }
+ panic("unreachable")
+}
+
+type ReaderAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+}
+
+// PutAll sends all of r via a multipart upload with parts no larger
+// than partSize bytes, which must be set to at least 5MB.
+// Parts previously uploaded are either reused if their checksum
+// and size match the new part, or otherwise overwritten with the
+// new content.
+// PutAll returns all the parts of m (reused or not).
+func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
+ old, err := m.ListParts()
+ if err != nil && !hasCode(err, "NoSuchUpload") {
+ return nil, err
+ }
+ reuse := 0 // Index of next old part to consider reusing.
+ current := 1 // Part number of latest good part handled.
+ totalSize, err := r.Seek(0, 2)
+ if err != nil {
+ return nil, err
+ }
+ first := true // Must send at least one empty part if the file is empty.
+ var result []Part
+NextSection:
+ for offset := int64(0); offset < totalSize || first; offset += partSize {
+ first = false
+ if offset+partSize > totalSize {
+ partSize = totalSize - offset
+ }
+ section := io.NewSectionReader(r, offset, partSize)
+ _, md5hex, md5b64, err := seekerInfo(section)
+ if err != nil {
+ return nil, err
+ }
+ for reuse < len(old) && old[reuse].N <= current {
+ // Looks like this part was already sent.
+ part := &old[reuse]
+ etag := `"` + md5hex + `"`
+ if part.N == current && part.Size == partSize && part.ETag == etag {
+ // Checksum matches. Reuse the old part.
+ result = append(result, *part)
+ current++
+ continue NextSection
+ }
+ reuse++
+ }
+
+ // Part wasn't found or doesn't match. Send it.
+ part, err := m.putPart(current, section, partSize, md5b64)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, part)
+ current++
+ }
+ return result, nil
+}
+
+type completeUpload struct {
+ XMLName xml.Name `xml:"CompleteMultipartUpload"`
+ Parts completeParts `xml:"Part"`
+}
+
+type completePart struct {
+ PartNumber int
+ ETag string
+}
+
+type completeParts []completePart
+
+func (p completeParts) Len() int { return len(p) }
+func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
+func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// Complete assembles the given previously uploaded parts into the
+// final object. This operation may take several minutes.
+//
+// See http://goo.gl/2Z7Tw for details.
+func (m *Multi) Complete(parts []Part) error {
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ }
+ c := completeUpload{}
+ for _, p := range parts {
+ c.Parts = append(c.Parts, completePart{p.N, p.ETag})
+ }
+ sort.Sort(c.Parts)
+ data, err := xml.Marshal(&c)
+ if err != nil {
+ return err
+ }
+ for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
+ req := &request{
+ method: "POST",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ payload: bytes.NewReader(data),
+ }
+ err := m.Bucket.S3.query(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ return err
+ }
+ panic("unreachable")
+}
+
+// Abort deletes an unifinished multipart upload and any previously
+// uploaded parts for it.
+//
+// After a multipart upload is aborted, no additional parts can be
+// uploaded using it. However, if any part uploads are currently in
+// progress, those part uploads might or might not succeed. As a result,
+// it might be necessary to abort a given multipart upload multiple
+// times in order to completely free all storage consumed by all parts.
+//
+// NOTE: If the described scenario happens to you, please report back to
+// the goamz authors with details. In the future such retrying should be
+// handled internally, but it's not clear what happens precisely (Is an
+// error returned? Is the issue completely undetectable?).
+//
+// See http://goo.gl/dnyJw for details.
+func (m *Multi) Abort() error {
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ }
+ for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
+ req := &request{
+ method: "DELETE",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ }
+ err := m.Bucket.S3.query(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ return err
+ }
+ panic("unreachable")
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go
new file mode 100644
index 000000000..efab302d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go
@@ -0,0 +1,371 @@
+package s3_test
+
+import (
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "strings"
+
+ "github.com/goamz/goamz/s3"
+ . "gopkg.in/check.v1"
+)
+
+func (s *S) TestInitMulti(c *C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "POST")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
+ c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
+ c.Assert(req.Form["uploads"], DeepEquals, []string{""})
+
+ c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestMultiNoPreviousUpload(c *C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, nil, InitMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.Multi("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/")
+ c.Assert(req.Form["uploads"], DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
+
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "POST")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["uploads"], DeepEquals, []string{""})
+
+ c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestMultiReturnOld(c *C) {
+ testServer.Response(200, nil, ListMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.Multi("multi1", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+ c.Assert(multi.Key, Equals, "multi1")
+ c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/")
+ c.Assert(req.Form["uploads"], DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
+}
+
+func (s *S) TestListParts(c *C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, ListPartsResultDump1)
+ testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
+ testServer.Response(200, nil, ListPartsResultDump2)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ parts, err := multi.ListParts()
+ c.Assert(err, IsNil)
+ c.Assert(parts, HasLen, 3)
+ c.Assert(parts[0].N, Equals, 1)
+ c.Assert(parts[0].Size, Equals, int64(5))
+ c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
+ c.Assert(parts[1].N, Equals, 2)
+ c.Assert(parts[1].Size, Equals, int64(5))
+ c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
+ c.Assert(parts[2].N, Equals, 3)
+ c.Assert(parts[2].Size, Equals, int64(5))
+ c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
+
+ testServer.WaitRequest() // The internal error.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
+ c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
+}
+
+func (s *S) TestPutPart(c *C) {
+ headers := map[string]string{
+ "ETag": `"26f90efd10d614f100252ff56d88dad8"`,
+ }
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, headers, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
+ c.Assert(err, IsNil)
+ c.Assert(part.N, Equals, 1)
+ c.Assert(part.Size, Equals, int64(8))
+ c.Assert(part.ETag, Equals, headers["ETag"])
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
+ c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
+}
+
+func readAll(r io.Reader) string {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (s *S) TestPutAllNoPreviousUpload(c *C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ etag1 := map[string]string{"ETag": `"etag1"`}
+ etag2 := map[string]string{"ETag": `"etag2"`}
+ etag3 := map[string]string{"ETag": `"etag3"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, etag1, "")
+ testServer.Response(200, etag2, "")
+ testServer.Response(200, etag3, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
+ c.Assert(parts, HasLen, 3)
+ c.Assert(parts[0].ETag, Equals, `"etag1"`)
+ c.Assert(parts[1].ETag, Equals, `"etag2"`)
+ c.Assert(parts[2].ETag, Equals, `"etag3"`)
+ c.Assert(err, IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts. Won't find anything.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+
+ // Send part 1.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), Equals, "part1")
+
+ // Send part 2.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), Equals, "part2")
+
+ // Send part 3 with shorter body.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
+ c.Assert(readAll(req.Body), Equals, "last")
+}
+
+func (s *S) TestPutAllZeroSizeFile(c *C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ etag1 := map[string]string{"ETag": `"etag1"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, etag1, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ // Must send at least one part, so that completing it will work.
+ parts, err := multi.PutAll(strings.NewReader(""), 5)
+ c.Assert(parts, HasLen, 1)
+ c.Assert(parts[0].ETag, Equals, `"etag1"`)
+ c.Assert(err, IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts. Won't find anything.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+
+ // Send empty part.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
+ c.Assert(readAll(req.Body), Equals, "")
+}
+
+func (s *S) TestPutAllResume(c *C) {
+ etag2 := map[string]string{"ETag": `"etag2"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, ListPartsResultDump1)
+ testServer.Response(200, nil, ListPartsResultDump2)
+ testServer.Response(200, etag2, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ // "part1" and "part3" match the checksums in ResultDump1.
+ // The middle one is a mismatch (it refers to "part2").
+ parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
+ c.Assert(parts, HasLen, 3)
+ c.Assert(parts[0].N, Equals, 1)
+ c.Assert(parts[0].Size, Equals, int64(5))
+ c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
+ c.Assert(parts[1].N, Equals, 2)
+ c.Assert(parts[1].Size, Equals, int64(5))
+ c.Assert(parts[1].ETag, Equals, `"etag2"`)
+ c.Assert(parts[2].N, Equals, 3)
+ c.Assert(parts[2].Size, Equals, int64(5))
+ c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
+ c.Assert(err, IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts, broken in two requests.
+ for i := 0; i < 2; i++ {
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ }
+
+ // Send part 2, as it didn't match the checksum.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), Equals, "partX")
+}
+
+func (s *S) TestMultiComplete(c *C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ // Note the 200 response. Completing will hold the connection on some
+ // kind of long poll, and may return a late error even after a 200.
+ testServer.Response(200, nil, InternalErrorDump)
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
+ c.Assert(err, IsNil)
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "POST")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+
+ var payload struct {
+ XMLName xml.Name
+ Part []struct {
+ PartNumber int
+ ETag string
+ }
+ }
+
+ dec := xml.NewDecoder(req.Body)
+ err = dec.Decode(&payload)
+ c.Assert(err, IsNil)
+
+ c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
+ c.Assert(len(payload.Part), Equals, 2)
+ c.Assert(payload.Part[0].PartNumber, Equals, 1)
+ c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
+ c.Assert(payload.Part[1].PartNumber, Equals, 2)
+ c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
+}
+
+func (s *S) TestMultiAbort(c *C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+
+ err = multi.Abort()
+ c.Assert(err, IsNil)
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "DELETE")
+ c.Assert(req.URL.Path, Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestListMulti(c *C) {
+ testServer.Response(200, nil, ListMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multis, prefixes, err := b.ListMulti("", "/")
+ c.Assert(err, IsNil)
+ c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
+ c.Assert(multis, HasLen, 2)
+ c.Assert(multis[0].Key, Equals, "multi1")
+ c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
+ c.Assert(multis[1].Key, Equals, "multi2")
+ c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/sample/")
+ c.Assert(req.Form["uploads"], DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], DeepEquals, []string{""})
+ c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
+ c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go
new file mode 100644
index 000000000..414ede0a7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go
@@ -0,0 +1,202 @@
+package s3_test
+
+var GetObjectErrorDump = `
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>NoSuchBucket</Code>
+ <Message>The specified bucket does not exist</Message>
+ <BucketName>non-existent-bucket</BucketName>
+ <RequestId>3F1B667FAD71C3D8</RequestId>
+ <HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId>
+</Error>
+`
+
+var GetListResultDump1 = `
+<?xml version="1.0" encoding="UTF-8"?>
+<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
+ <Name>quotes</Name>
+ <Prefix>N</Prefix>
+ <IsTruncated>false</IsTruncated>
+ <Contents>
+ <Key>Nelson</Key>
+ <LastModified>2006-01-01T12:00:00.000Z</LastModified>
+ <ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
+ <Size>5</Size>
+ <StorageClass>STANDARD</StorageClass>
+ <Owner>
+ <ID>bcaf161ca5fb16fd081034f</ID>
+ <DisplayName>webfile</DisplayName>
+ </Owner>
+ </Contents>
+ <Contents>
+ <Key>Neo</Key>
+ <LastModified>2006-01-01T12:00:00.000Z</LastModified>
+ <ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
+ <Size>4</Size>
+ <StorageClass>STANDARD</StorageClass>
+ <Owner>
+ <ID>bcaf1ffd86a5fb16fd081034f</ID>
+ <DisplayName>webfile</DisplayName>
+ </Owner>
+ </Contents>
+</ListBucketResult>
+`
+
+var GetListResultDump2 = `
+<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Name>example-bucket</Name>
+ <Prefix>photos/2006/</Prefix>
+ <Marker>some-marker</Marker>
+ <MaxKeys>1000</MaxKeys>
+ <Delimiter>/</Delimiter>
+ <IsTruncated>false</IsTruncated>
+
+ <CommonPrefixes>
+ <Prefix>photos/2006/feb/</Prefix>
+ </CommonPrefixes>
+ <CommonPrefixes>
+ <Prefix>photos/2006/jan/</Prefix>
+ </CommonPrefixes>
+</ListBucketResult>
+`
+
+var InitMultiResultDump = `
+<?xml version="1.0" encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Bucket>sample</Bucket>
+ <Key>multi</Key>
+ <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
+</InitiateMultipartUploadResult>
+`
+
+var ListPartsResultDump1 = `
+<?xml version="1.0" encoding="UTF-8"?>
+<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Bucket>sample</Bucket>
+ <Key>multi</Key>
+ <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
+ <Initiator>
+ <ID>bb5c0f63b0b25f2d099c</ID>
+ <DisplayName>joe</DisplayName>
+ </Initiator>
+ <Owner>
+ <ID>bb5c0f63b0b25f2d099c</ID>
+ <DisplayName>joe</DisplayName>
+ </Owner>
+ <StorageClass>STANDARD</StorageClass>
+ <PartNumberMarker>0</PartNumberMarker>
+ <NextPartNumberMarker>2</NextPartNumberMarker>
+ <MaxParts>2</MaxParts>
+ <IsTruncated>true</IsTruncated>
+ <Part>
+ <PartNumber>1</PartNumber>
+ <LastModified>2013-01-30T13:45:51.000Z</LastModified>
+ <ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
+ <Size>5</Size>
+ </Part>
+ <Part>
+ <PartNumber>2</PartNumber>
+ <LastModified>2013-01-30T13:45:52.000Z</LastModified>
+ <ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
+ <Size>5</Size>
+ </Part>
+</ListPartsResult>
+`
+
+var ListPartsResultDump2 = `
+<?xml version="1.0" encoding="UTF-8"?>
+<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Bucket>sample</Bucket>
+ <Key>multi</Key>
+ <UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
+ <Initiator>
+ <ID>bb5c0f63b0b25f2d099c</ID>
+ <DisplayName>joe</DisplayName>
+ </Initiator>
+ <Owner>
+ <ID>bb5c0f63b0b25f2d099c</ID>
+ <DisplayName>joe</DisplayName>
+ </Owner>
+ <StorageClass>STANDARD</StorageClass>
+ <PartNumberMarker>2</PartNumberMarker>
+ <NextPartNumberMarker>3</NextPartNumberMarker>
+ <MaxParts>2</MaxParts>
+ <IsTruncated>false</IsTruncated>
+ <Part>
+ <PartNumber>3</PartNumber>
+ <LastModified>2013-01-30T13:46:50.000Z</LastModified>
+ <ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
+ <Size>5</Size>
+ </Part>
+</ListPartsResult>
+`
+
+var ListMultiResultDump = `
+<?xml version="1.0"?>
+<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
+ <KeyMarker/>
+ <UploadIdMarker/>
+ <NextKeyMarker>multi1</NextKeyMarker>
+ <NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
+ <Delimiter>/</Delimiter>
+ <MaxUploads>1000</MaxUploads>
+ <IsTruncated>false</IsTruncated>
+ <Upload>
+ <Key>multi1</Key>
+ <UploadId>iUVug89pPvSswrikD</UploadId>
+ <Initiator>
+ <ID>bb5c0f63b0b25f2d0</ID>
+ <DisplayName>gustavoniemeyer</DisplayName>
+ </Initiator>
+ <Owner>
+ <ID>bb5c0f63b0b25f2d0</ID>
+ <DisplayName>gustavoniemeyer</DisplayName>
+ </Owner>
+ <StorageClass>STANDARD</StorageClass>
+ <Initiated>2013-01-30T18:15:47.000Z</Initiated>
+ </Upload>
+ <Upload>
+ <Key>multi2</Key>
+ <UploadId>DkirwsSvPp98guVUi</UploadId>
+ <Initiator>
+ <ID>bb5c0f63b0b25f2d0</ID>
+ <DisplayName>joe</DisplayName>
+ </Initiator>
+ <Owner>
+ <ID>bb5c0f63b0b25f2d0</ID>
+ <DisplayName>joe</DisplayName>
+ </Owner>
+ <StorageClass>STANDARD</StorageClass>
+ <Initiated>2013-01-30T18:15:47.000Z</Initiated>
+ </Upload>
+ <CommonPrefixes>
+ <Prefix>a/</Prefix>
+ </CommonPrefixes>
+ <CommonPrefixes>
+ <Prefix>b/</Prefix>
+ </CommonPrefixes>
+</ListMultipartUploadsResult>
+`
+
+var NoSuchUploadErrorDump = `
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>NoSuchUpload</Code>
+ <Message>Not relevant</Message>
+ <BucketName>sample</BucketName>
+ <RequestId>3F1B667FAD71C3D8</RequestId>
+ <HostId>kjhwqk</HostId>
+</Error>
+`
+
+var InternalErrorDump = `
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>InternalError</Code>
+ <Message>Not relevant</Message>
+ <BucketName>sample</BucketName>
+ <RequestId>3F1B667FAD71C3D8</RequestId>
+ <HostId>kjhwqk</HostId>
+</Error>
+`
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go
new file mode 100644
index 000000000..88ef975d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go
@@ -0,0 +1,1151 @@
+//
+// goamz - Go packages to interact with the Amazon Web Services.
+//
+// https://wiki.ubuntu.com/goamz
+//
+// Copyright (c) 2011 Canonical Ltd.
+//
+// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
+//
+
+package s3
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "github.com/goamz/goamz/aws"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const debug = false
+
+// The S3 type encapsulates operations with an S3 region.
+type S3 struct {
+ aws.Auth
+ aws.Region
+
+ // ConnectTimeout is the maximum time a request attempt will
+ // wait for a successful connection to be made.
+ //
+ // A value of zero means no timeout.
+ ConnectTimeout time.Duration
+
+ // ReadTimeout is the maximum time a request attempt will wait
+ // for an individual read to complete.
+ //
+ // A value of zero means no timeout.
+ ReadTimeout time.Duration
+
+ // WriteTimeout is the maximum time a request attempt will
+ // wait for an individual write to complete.
+ //
+ // A value of zero means no timeout.
+ WriteTimeout time.Duration
+
+ // RequestTimeout is the maximum time a request attempt can
+ // take before operations return a timeout error.
+ //
+ // This includes connection time, any redirects, and reading
+ // the response body. The timer remains running after the request
+ // is made so it can interrupt reading of the response data.
+ //
+ // A Timeout of zero means no timeout.
+ RequestTimeout time.Duration
+
+ // AttemptStrategy is the attempt strategy used for requests.
+ aws.AttemptStrategy
+
+ // Reserve the right of using private data.
+ private byte
+
+ // client used for requests
+ client *http.Client
+}
+
+// The Bucket type encapsulates operations with an S3 bucket.
+type Bucket struct {
+ *S3
+ Name string
+}
+
+// The Owner type represents the owner of the object in an S3 bucket.
+type Owner struct {
+ ID string
+ DisplayName string
+}
+
+// Fold options into an Options struct
+//
+type Options struct {
+ SSE bool
+ Meta map[string][]string
+ ContentEncoding string
+ CacheControl string
+ RedirectLocation string
+ ContentMD5 string
+ // What else?
+ // Content-Disposition string
+ //// The following become headers so they are []strings rather than strings... I think
+ // x-amz-storage-class []string
+}
+
+type CopyOptions struct {
+ Options
+ MetadataDirective string
+ ContentType string
+}
+
+// CopyObjectResult is the output from a Copy request
+type CopyObjectResult struct {
+ ETag string
+ LastModified string
+}
+
+// DefaultAttemptStrategy is the default AttemptStrategy used by S3 objects created by New.
+var DefaultAttemptStrategy = aws.AttemptStrategy{
+ Min: 5,
+ Total: 5 * time.Second,
+ Delay: 200 * time.Millisecond,
+}
+
+// New creates a new S3. Optional client argument allows for custom http.clients to be used.
+func New(auth aws.Auth, region aws.Region, client ...*http.Client) *S3 {
+
+ var httpclient *http.Client
+
+ if len(client) > 0 {
+ httpclient = client[0]
+ }
+
+ return &S3{Auth: auth, Region: region, AttemptStrategy: DefaultAttemptStrategy, client: httpclient}
+}
+
+// Bucket returns a Bucket with the given name.
+func (s3 *S3) Bucket(name string) *Bucket {
+ if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
+ name = strings.ToLower(name)
+ }
+ return &Bucket{s3, name}
+}
+
+var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <LocationConstraint>%s</LocationConstraint>
+</CreateBucketConfiguration>`
+
+// locationConstraint returns an io.Reader specifying a LocationConstraint if
+// required for the region.
+//
+// See http://goo.gl/bh9Kq for details.
+func (s3 *S3) locationConstraint() io.Reader {
+ constraint := ""
+ if s3.Region.S3LocationConstraint {
+ constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
+ }
+ return strings.NewReader(constraint)
+}
+
+type ACL string
+
+const (
+ Private = ACL("private")
+ PublicRead = ACL("public-read")
+ PublicReadWrite = ACL("public-read-write")
+ AuthenticatedRead = ACL("authenticated-read")
+ BucketOwnerRead = ACL("bucket-owner-read")
+ BucketOwnerFull = ACL("bucket-owner-full-control")
+)
+
+// PutBucket creates a new bucket.
+//
+// See http://goo.gl/ndjnR for details.
+func (b *Bucket) PutBucket(perm ACL) error {
+ headers := map[string][]string{
+ "x-amz-acl": {string(perm)},
+ }
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: "/",
+ headers: headers,
+ payload: b.locationConstraint(),
+ }
+ return b.S3.query(req, nil)
+}
+
+// DelBucket removes an existing S3 bucket. All objects in the bucket must
+// be removed before the bucket itself can be removed.
+//
+// See http://goo.gl/GoBrY for details.
+func (b *Bucket) DelBucket() (err error) {
+ req := &request{
+ method: "DELETE",
+ bucket: b.Name,
+ path: "/",
+ }
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ err = b.S3.query(req, nil)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ return err
+}
+
+// Get retrieves an object from an S3 bucket.
+//
+// See http://goo.gl/isCO7 for details.
+func (b *Bucket) Get(path string) (data []byte, err error) {
+ body, err := b.GetReader(path)
+ defer func() {
+ if body != nil {
+ body.Close()
+ }
+ }()
+ if err != nil {
+ return nil, err
+ }
+ data, err = ioutil.ReadAll(body)
+ return data, err
+}
+
+// GetReader retrieves an object from an S3 bucket,
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading.
+func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
+ resp, err := b.GetResponse(path)
+ if resp != nil {
+ return resp.Body, err
+ }
+ return nil, err
+}
+
+// GetResponse retrieves an object from an S3 bucket,
+// returning the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
+ return b.GetResponseWithHeaders(path, make(http.Header))
+}
+
+// GetReaderWithHeaders retrieves an object from an S3 bucket
+// Accepts custom headers to be sent as the second parameter
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) {
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ err = b.S3.prepare(req)
+ if err != nil {
+ return nil, err
+ }
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+ }
+ panic("unreachable")
+}
+
+// Exists checks whether or not an object exists on an S3 bucket using a HEAD request.
+func (b *Bucket) Exists(path string) (exists bool, err error) {
+ req := &request{
+ method: "HEAD",
+ bucket: b.Name,
+ path: path,
+ }
+ err = b.S3.prepare(req)
+ if err != nil {
+ return
+ }
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+
+ if err != nil {
+ // We can treat a 403 or 404 as non existance
+ if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ if resp.StatusCode/100 == 2 {
+ exists = true
+ }
+ return exists, err
+ }
+ return false, fmt.Errorf("S3 Currently Unreachable")
+}
+
+// Head HEADs an object in the S3 bucket, returns the response with
+// no body see http://bit.ly/17K1ylI
+func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
+ req := &request{
+ method: "HEAD",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ return nil, err
+ }
+
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+ }
+ return nil, fmt.Errorf("S3 Currently Unreachable")
+}
+
+// Put inserts an object into the S3 bucket.
+//
+// See http://goo.gl/FEBPD for details.
+func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
+ body := bytes.NewBuffer(data)
+ return b.PutReader(path, body, int64(len(data)), contType, perm, options)
+}
+
+// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
+func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) {
+ headers := map[string][]string{
+ "x-amz-acl": {string(perm)},
+ "x-amz-copy-source": {source},
+ }
+ options.addHeaders(headers)
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ resp := &CopyObjectResult{}
+ err := b.S3.query(req, resp)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+/*
+PutHeader - like Put, inserts an object into the S3 bucket.
+Instead of Content-Type string, pass in custom headers to override defaults.
+*/
+func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
+ body := bytes.NewBuffer(data)
+ return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
+}
+
+// PutReader inserts an object into the S3 bucket by consuming data
+// from r until EOF.
+func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(length, 10)},
+ "Content-Type": {contType},
+ "x-amz-acl": {string(perm)},
+ }
+ options.addHeaders(headers)
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ payload: r,
+ }
+ return b.S3.query(req, nil)
+}
+
+/*
+PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
+Instead of Content-Type string, pass in custom headers to override defaults.
+*/
+func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
+ // Default headers
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(length, 10)},
+ "Content-Type": {"application/text"},
+ "x-amz-acl": {string(perm)},
+ }
+
+ // Override with custom headers
+ for key, value := range customHeaders {
+ headers[key] = value
+ }
+
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ payload: r,
+ }
+ return b.S3.query(req, nil)
+}
+
+// addHeaders adds o's specified fields to headers
+func (o Options) addHeaders(headers map[string][]string) {
+ if o.SSE {
+ headers["x-amz-server-side-encryption"] = []string{"AES256"}
+ }
+ if len(o.ContentEncoding) != 0 {
+ headers["Content-Encoding"] = []string{o.ContentEncoding}
+ }
+ if len(o.CacheControl) != 0 {
+ headers["Cache-Control"] = []string{o.CacheControl}
+ }
+ if len(o.ContentMD5) != 0 {
+ headers["Content-MD5"] = []string{o.ContentMD5}
+ }
+ if len(o.RedirectLocation) != 0 {
+ headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation}
+ }
+ for k, v := range o.Meta {
+ headers["x-amz-meta-"+k] = v
+ }
+}
+
+// addHeaders adds o's specified fields to headers
+func (o CopyOptions) addHeaders(headers map[string][]string) {
+ o.Options.addHeaders(headers)
+ if len(o.MetadataDirective) != 0 {
+ headers["x-amz-metadata-directive"] = []string{o.MetadataDirective}
+ }
+ if len(o.ContentType) != 0 {
+ headers["Content-Type"] = []string{o.ContentType}
+ }
+}
+
+func makeXmlBuffer(doc []byte) *bytes.Buffer {
+ buf := new(bytes.Buffer)
+ buf.WriteString(xml.Header)
+ buf.Write(doc)
+ return buf
+}
+
+type RoutingRule struct {
+ ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"`
+ RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"`
+ RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"`
+}
+
+type WebsiteConfiguration struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"`
+ IndexDocumentSuffix string `xml:"IndexDocument>Suffix"`
+ ErrorDocumentKey string `xml:"ErrorDocument>Key"`
+ RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
+}
+
+func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
+
+ doc, err := xml.Marshal(configuration)
+ if err != nil {
+ return err
+ }
+
+ buf := makeXmlBuffer(doc)
+
+ return b.PutBucketSubresource("website", buf, int64(buf.Len()))
+}
+
+func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(length, 10)},
+ }
+ req := &request{
+ path: "/",
+ method: "PUT",
+ bucket: b.Name,
+ headers: headers,
+ payload: r,
+ params: url.Values{subresource: {""}},
+ }
+
+ return b.S3.query(req, nil)
+}
+
+// Del removes an object from the S3 bucket.
+//
+// See http://goo.gl/APeTt for details.
+func (b *Bucket) Del(path string) error {
+ req := &request{
+ method: "DELETE",
+ bucket: b.Name,
+ path: path,
+ }
+ return b.S3.query(req, nil)
+}
+
+type Delete struct {
+ Quiet bool `xml:"Quiet,omitempty"`
+ Objects []Object `xml:"Object"`
+}
+
+type Object struct {
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+// DelMulti removes up to 1000 objects from the S3 bucket.
+//
+// See http://goo.gl/jx6cWK for details.
+func (b *Bucket) DelMulti(objects Delete) error {
+ doc, err := xml.Marshal(objects)
+ if err != nil {
+ return err
+ }
+
+ buf := makeXmlBuffer(doc)
+ digest := md5.New()
+ size, err := digest.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(int64(size), 10)},
+ "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
+ "Content-Type": {"text/xml"},
+ }
+ req := &request{
+ path: "/",
+ method: "POST",
+ params: url.Values{"delete": {""}},
+ bucket: b.Name,
+ headers: headers,
+ payload: buf,
+ }
+
+ return b.S3.query(req, nil)
+}
+
+// The ListResp type holds the results of a List bucket operation.
+type ListResp struct {
+ Name string
+ Prefix string
+ Delimiter string
+ Marker string
+ NextMarker string
+ MaxKeys int
+
+ // IsTruncated is true if the results have been truncated because
+ // there are more keys and prefixes than can fit in MaxKeys.
+ // N.B. this is the opposite sense to that documented (incorrectly) in
+ // http://goo.gl/YjQTc
+ IsTruncated bool
+ Contents []Key
+ CommonPrefixes []string `xml:">Prefix"`
+}
+
+// The Key type represents an item stored in an S3 bucket.
+type Key struct {
+ Key string
+ LastModified string
+ Size int64
+ // ETag gives the hex-encoded MD5 sum of the contents,
+ // surrounded with double-quotes.
+ ETag string
+ StorageClass string
+ Owner Owner
+}
+
+// List returns information about objects in an S3 bucket.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix.
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+// The marker parameter specifies the key to start with when listing objects
+// in a bucket. Amazon S3 lists objects in alphabetical order and
+// will return keys alphabetically greater than the marker.
+//
+// The max parameter specifies how many keys + common prefixes to return in
+// the response. The default is 1000.
+//
+// For example, given these keys in a bucket:
+//
+// index.html
+// index2.html
+// photos/2006/January/sample.jpg
+// photos/2006/February/sample2.jpg
+// photos/2006/February/sample3.jpg
+// photos/2006/February/sample4.jpg
+//
+// Listing this bucket with delimiter set to "/" would yield the
+// following result:
+//
+// &ListResp{
+// Name: "sample-bucket",
+// MaxKeys: 1000,
+// Delimiter: "/",
+// Contents: []Key{
+// {Key: "index.html", "index2.html"},
+// },
+// CommonPrefixes: []string{
+// "photos/",
+// },
+// }
+//
+// Listing the same bucket with delimiter set to "/" and prefix set to
+// "photos/2006/" would yield the following result:
+//
+// &ListResp{
+// Name: "sample-bucket",
+// MaxKeys: 1000,
+// Delimiter: "/",
+// Prefix: "photos/2006/",
+// CommonPrefixes: []string{
+// "photos/2006/February/",
+// "photos/2006/January/",
+// },
+// }
+//
+// See http://goo.gl/YjQTc for details.
+func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
+ params := map[string][]string{
+ "prefix": {prefix},
+ "delimiter": {delim},
+ "marker": {marker},
+ }
+ if max != 0 {
+ params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
+ }
+ req := &request{
+ bucket: b.Name,
+ params: params,
+ }
+ result = &ListResp{}
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ err = b.S3.query(req, result)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// The VersionsResp type holds the results of a list bucket Versions operation.
+type VersionsResp struct {
+ Name string
+ Prefix string
+ KeyMarker string
+ VersionIdMarker string
+ MaxKeys int
+ Delimiter string
+ IsTruncated bool
+ Versions []Version
+ CommonPrefixes []string `xml:">Prefix"`
+}
+
+// The Version type represents an object version stored in an S3 bucket.
+type Version struct {
+ Key string
+ VersionId string
+ IsLatest bool
+ LastModified string
+ // ETag gives the hex-encoded MD5 sum of the contents,
+ // surrounded with double-quotes.
+ ETag string
+ Size int64
+ Owner Owner
+ StorageClass string
+}
+
+func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
+ params := map[string][]string{
+ "versions": {""},
+ "prefix": {prefix},
+ "delimiter": {delim},
+ }
+
+ if len(versionIdMarker) != 0 {
+ params["version-id-marker"] = []string{versionIdMarker}
+ }
+ if len(keyMarker) != 0 {
+ params["key-marker"] = []string{keyMarker}
+ }
+
+ if max != 0 {
+ params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
+ }
+ req := &request{
+ bucket: b.Name,
+ params: params,
+ }
+ result = &VersionsResp{}
+ for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
+ err = b.S3.query(req, result)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// Returns a mapping of all key names in this bucket to Key objects
+func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
+ bucket_contents := map[string]Key{}
+ prefix := ""
+ path_separator := ""
+ marker := ""
+ for {
+ contents, err := b.List(prefix, path_separator, marker, 1000)
+ if err != nil {
+ return &bucket_contents, err
+ }
+ for _, key := range contents.Contents {
+ bucket_contents[key.Key] = key
+ }
+ if contents.IsTruncated {
+ marker = contents.NextMarker
+ } else {
+ break
+ }
+ }
+
+ return &bucket_contents, nil
+}
+
+// URL returns a non-signed URL that allows retriving the
+// object at path. It only works if the object is publicly
+// readable (see SignedURL).
+func (b *Bucket) URL(path string) string {
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ panic(err)
+ }
+ u, err := req.url()
+ if err != nil {
+ panic(err)
+ }
+ u.RawQuery = ""
+ return u.String()
+}
+
+// SignedURL returns a signed URL that allows anyone holding the URL
+// to retrieve the object at path. The signature is valid until expires.
+func (b *Bucket) SignedURL(path string, expires time.Time) string {
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ panic(err)
+ }
+ u, err := req.url()
+ if err != nil {
+ panic(err)
+ }
+ if b.S3.Auth.Token() != "" {
+ return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0])
+ } else {
+ return u.String()
+ }
+}
+
+// UploadSignedURL returns a signed URL that allows anyone holding the URL
+// to upload the object at path. The signature is valid until expires.
+// contenttype is a string like image/png
+// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself]
+func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string {
+ expire_date := expires.Unix()
+ if method != "POST" {
+ method = "PUT"
+ }
+ stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path
+ fmt.Println("String to sign:\n", stringToSign)
+ a := b.S3.Auth
+ secretKey := a.SecretKey
+ accessId := a.AccessKey
+ mac := hmac.New(sha1.New, []byte(secretKey))
+ mac.Write([]byte(stringToSign))
+ macsum := mac.Sum(nil)
+ signature := base64.StdEncoding.EncodeToString([]byte(macsum))
+ signature = strings.TrimSpace(signature)
+
+ signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/")
+ if err != nil {
+ log.Println("ERROR sining url for S3 upload", err)
+ return ""
+ }
+ signedurl.Path += path
+ params := url.Values{}
+ params.Add("AWSAccessKeyId", accessId)
+ params.Add("Expires", strconv.FormatInt(expire_date, 10))
+ params.Add("Signature", signature)
+ if a.Token() != "" {
+ params.Add("token", a.Token())
+ }
+
+ signedurl.RawQuery = params.Encode()
+ return signedurl.String()
+}
+
+// PostFormArgs returns the action and input fields needed to allow anonymous
+// uploads to a bucket within the expiration limit
+func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) {
+ conditions := make([]string, 0)
+ fields = map[string]string{
+ "AWSAccessKeyId": b.Auth.AccessKey,
+ "key": path,
+ }
+
+ conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path))
+ conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name))
+ if redirect != "" {
+ conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect))
+ fields["success_action_redirect"] = redirect
+ }
+
+ vExpiration := expires.Format("2006-01-02T15:04:05Z")
+ vConditions := strings.Join(conditions, ",")
+ policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions)
+ policy64 := base64.StdEncoding.EncodeToString([]byte(policy))
+ fields["policy"] = policy64
+
+ signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey))
+ signer.Write([]byte(policy64))
+ fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil))
+
+ action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name)
+ return
+}
+
+type request struct {
+ method string
+ bucket string
+ path string
+ signpath string
+ params url.Values
+ headers http.Header
+ baseurl string
+ payload io.Reader
+ prepared bool
+}
+
+func (req *request) url() (*url.URL, error) {
+ u, err := url.Parse(req.baseurl)
+ if err != nil {
+ return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
+ }
+ u.RawQuery = req.params.Encode()
+ u.Path = req.path
+ return u, nil
+}
+
+// query prepares and runs the req request.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) query(req *request, resp interface{}) error {
+ err := s3.prepare(req)
+ if err == nil {
+ var httpResponse *http.Response
+ httpResponse, err = s3.run(req, resp)
+ if resp == nil && httpResponse != nil {
+ httpResponse.Body.Close()
+ }
+ }
+ return err
+}
+
+// prepare sets up req to be delivered to S3.
+func (s3 *S3) prepare(req *request) error {
+ var signpath = req.path
+
+ if !req.prepared {
+ req.prepared = true
+ if req.method == "" {
+ req.method = "GET"
+ }
+ // Copy so they can be mutated without affecting on retries.
+ params := make(url.Values)
+ headers := make(http.Header)
+ for k, v := range req.params {
+ params[k] = v
+ }
+ for k, v := range req.headers {
+ headers[k] = v
+ }
+ req.params = params
+ req.headers = headers
+ if !strings.HasPrefix(req.path, "/") {
+ req.path = "/" + req.path
+ }
+ signpath = req.path
+ if req.bucket != "" {
+ req.baseurl = s3.Region.S3BucketEndpoint
+ if req.baseurl == "" {
+ // Use the path method to address the bucket.
+ req.baseurl = s3.Region.S3Endpoint
+ req.path = "/" + req.bucket + req.path
+ } else {
+ // Just in case, prevent injection.
+ if strings.IndexAny(req.bucket, "/:@") >= 0 {
+ return fmt.Errorf("bad S3 bucket: %q", req.bucket)
+ }
+ req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
+ }
+ signpath = "/" + req.bucket + signpath
+ }
+ }
+
+ // Always sign again as it's not clear how far the
+ // server has handled a previous attempt.
+ u, err := url.Parse(req.baseurl)
+ if err != nil {
+ return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
+ }
+ reqSignpathSpaceFix := (&url.URL{Path: signpath}).String()
+ req.headers["Host"] = []string{u.Host}
+ req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
+ if s3.Auth.Token() != "" {
+ req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()}
+ }
+ sign(s3.Auth, req.method, reqSignpathSpaceFix, req.params, req.headers)
+ return nil
+}
+
+// run sends req and returns the http response from the server.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
+ if debug {
+ log.Printf("Running S3 request: %#v", req)
+ }
+
+ u, err := req.url()
+ if err != nil {
+ return nil, err
+ }
+
+ hreq := http.Request{
+ URL: u,
+ Method: req.method,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Close: true,
+ Header: req.headers,
+ }
+
+ if v, ok := req.headers["Content-Length"]; ok {
+ hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
+ delete(req.headers, "Content-Length")
+ }
+ if req.payload != nil {
+ hreq.Body = ioutil.NopCloser(req.payload)
+ }
+
+ if s3.client == nil {
+ s3.client = &http.Client{
+ Transport: &http.Transport{
+ Dial: func(netw, addr string) (c net.Conn, err error) {
+ c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout)
+ if err != nil {
+ return
+ }
+
+ var deadline time.Time
+ if s3.RequestTimeout > 0 {
+ deadline = time.Now().Add(s3.RequestTimeout)
+ c.SetDeadline(deadline)
+ }
+
+ if s3.ReadTimeout > 0 || s3.WriteTimeout > 0 {
+ c = &ioTimeoutConn{
+ TCPConn: c.(*net.TCPConn),
+ readTimeout: s3.ReadTimeout,
+ writeTimeout: s3.WriteTimeout,
+ requestDeadline: deadline,
+ }
+ }
+ return
+ },
+ },
+ }
+ }
+
+ hresp, err := s3.client.Do(&hreq)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ dump, _ := httputil.DumpResponse(hresp, true)
+ log.Printf("} -> %s\n", dump)
+ }
+ if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 {
+ defer hresp.Body.Close()
+ return nil, buildError(hresp)
+ }
+ if resp != nil {
+ err = xml.NewDecoder(hresp.Body).Decode(resp)
+ hresp.Body.Close()
+ if debug {
+ log.Printf("goamz.s3> decoded xml into %#v", resp)
+ }
+ }
+ return hresp, err
+}
+
+// Error represents an error in an operation with S3.
+type Error struct {
+ StatusCode int // HTTP status code (200, 403, ...)
+ Code string // EC2 error code ("UnsupportedOperation", ...)
+ Message string // The human-oriented error message
+ BucketName string
+ RequestId string
+ HostId string
+}
+
+func (e *Error) Error() string {
+ return e.Message
+}
+
+func buildError(r *http.Response) error {
+ if debug {
+ log.Printf("got error (status code %v)", r.StatusCode)
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("\tread error: %v", err)
+ } else {
+ log.Printf("\tdata:\n%s\n\n", data)
+ }
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
+ }
+
+ err := Error{}
+ // TODO return error if Unmarshal fails?
+ xml.NewDecoder(r.Body).Decode(&err)
+ r.Body.Close()
+ err.StatusCode = r.StatusCode
+ if err.Message == "" {
+ err.Message = r.Status
+ }
+ if debug {
+ log.Printf("err: %#v\n", err)
+ }
+ return &err
+}
+
+func shouldRetry(err error) bool {
+ if err == nil {
+ return false
+ }
+ if e, ok := err.(*url.Error); ok {
+ // Transport returns this string if it detects a write on a connection which
+ // has already had an error
+ if e.Err.Error() == "http: can't write HTTP request on broken connection" {
+ return true
+ }
+ err = e.Err
+ }
+
+ switch err {
+ case io.ErrUnexpectedEOF, io.EOF:
+ return true
+ }
+ switch e := err.(type) {
+ case *net.DNSError:
+ return true
+ case *net.OpError:
+ switch e.Op {
+ case "read", "write", "WSARecv", "WSASend", "ConnectEx":
+ return true
+ }
+ case *Error:
+ switch e.Code {
+ case "InternalError", "NoSuchUpload", "NoSuchBucket":
+ return true
+ }
+ }
+ return false
+}
+
+func hasCode(err error, code string) bool {
+ s3err, ok := err.(*Error)
+ return ok && s3err.Code == code
+}
+
+// ioTimeoutConn is a net.Conn which sets a deadline for each Read or Write operation
+type ioTimeoutConn struct {
+ *net.TCPConn
+ readTimeout time.Duration
+ writeTimeout time.Duration
+ requestDeadline time.Time
+}
+
+func (c *ioTimeoutConn) deadline(timeout time.Duration) time.Time {
+ dl := time.Now().Add(timeout)
+ if c.requestDeadline.IsZero() || dl.Before(c.requestDeadline) {
+ return dl
+ }
+
+ return c.requestDeadline
+}
+
+func (c *ioTimeoutConn) Read(b []byte) (int, error) {
+ if c.readTimeout > 0 {
+ err := c.TCPConn.SetReadDeadline(c.deadline(c.readTimeout))
+ if err != nil {
+ return 0, err
+ }
+ }
+ return c.TCPConn.Read(b)
+}
+
+func (c *ioTimeoutConn) Write(b []byte) (int, error) {
+ if c.writeTimeout > 0 {
+ err := c.TCPConn.SetWriteDeadline(c.deadline(c.writeTimeout))
+ if err != nil {
+ return 0, err
+ }
+ }
+ return c.TCPConn.Write(b)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go
new file mode 100644
index 000000000..24d4dfcc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go
@@ -0,0 +1,427 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/goamz/goamz/aws"
+ "github.com/goamz/goamz/s3"
+ "github.com/goamz/goamz/testutil"
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+type S struct {
+ s3 *s3.S3
+}
+
+var _ = Suite(&S{})
+
+var testServer = testutil.NewHTTPServer()
+
+func (s *S) SetUpSuite(c *C) {
+ testServer.Start()
+ auth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
+ s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
+}
+
+func (s *S) TearDownSuite(c *C) {
+ s.s3.AttemptStrategy = s3.DefaultAttemptStrategy
+}
+
+func (s *S) SetUpTest(c *C) {
+ s.s3.AttemptStrategy = aws.AttemptStrategy{
+ Total: 300 * time.Millisecond,
+ Delay: 100 * time.Millisecond,
+ }
+}
+
+func (s *S) TearDownTest(c *C) {
+ testServer.Flush()
+}
+
+func (s *S) DisableRetries() {
+ s.s3.AttemptStrategy = aws.AttemptStrategy{}
+}
+
+// PutBucket docs: http://goo.gl/kBTCu
+
+func (s *S) TestPutBucket(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/bucket/")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+}
+
+// Head docs: http://bit.ly/17K1ylI
+
+func (s *S) TestHead(c *C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ resp, err := b.Head("name", nil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "HEAD")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+
+ c.Assert(err, IsNil)
+ c.Assert(resp.ContentLength, FitsTypeOf, int64(0))
+ c.Assert(resp, FitsTypeOf, &http.Response{})
+}
+
+// DeleteBucket docs: http://goo.gl/GoBrY
+
+func (s *S) TestDelBucket(c *C) {
+ testServer.Response(204, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.DelBucket()
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "DELETE")
+ c.Assert(req.URL.Path, Equals, "/bucket/")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+}
+
+// GetObject docs: http://goo.gl/isCO7
+
+func (s *S) TestGet(c *C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ data, err := b.Get("name")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "content")
+}
+
+func (s *S) TestURL(c *C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ url := b.URL("name")
+ r, err := http.Get(url)
+ c.Assert(err, IsNil)
+ data, err := ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "content")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+}
+
+func (s *S) TestGetReader(c *C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ rc, err := b.GetReader("name")
+ c.Assert(err, IsNil)
+ data, err := ioutil.ReadAll(rc)
+ rc.Close()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "content")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+}
+
+func (s *S) TestGetNotFound(c *C) {
+ for i := 0; i < 10; i++ {
+ testServer.Response(404, nil, GetObjectErrorDump)
+ }
+
+ b := s.s3.Bucket("non-existent-bucket")
+ data, err := b.Get("non-existent")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+
+ s3err, _ := err.(*s3.Error)
+ c.Assert(s3err, NotNil)
+ c.Assert(s3err.StatusCode, Equals, 404)
+ c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
+ c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
+ c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
+ c.Assert(s3err.Code, Equals, "NoSuchBucket")
+ c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
+ c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
+ c.Assert(data, IsNil)
+}
+
+// PutObject docs: http://goo.gl/FEBPD
+
+func (s *S) TestPutObject(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
+ //c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
+}
+
+func (s *S) TestPutObjectReadTimeout(c *C) {
+ s.s3.ReadTimeout = 50 * time.Millisecond
+ defer func() {
+ s.s3.ReadTimeout = 0
+ }()
+
+ b := s.s3.Bucket("bucket")
+ err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
+
+ // Make sure that we get a timeout error.
+ c.Assert(err, NotNil)
+
+ // Set the response after the request times out so that the next request will work.
+ testServer.Response(200, nil, "")
+
+ // This time set the response within our timeout period so that we expect the call
+ // to return successfully.
+ go func() {
+ time.Sleep(25 * time.Millisecond)
+ testServer.Response(200, nil, "")
+ }()
+ err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestPutObjectHeader(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.PutHeader(
+ "name",
+ []byte("content"),
+ map[string][]string{"Content-Type": {"content-type"}},
+ s3.Private,
+ )
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
+ //c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
+}
+
+func (s *S) TestPutReader(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ buf := bytes.NewBufferString("content")
+ err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{})
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
+ //c.Assert(req.Header["Content-MD5"], Equals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
+}
+
+func (s *S) TestPutReaderHeader(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ buf := bytes.NewBufferString("content")
+ err := b.PutReaderHeader(
+ "name",
+ buf,
+ int64(buf.Len()),
+ map[string][]string{"Content-Type": {"content-type"}},
+ s3.Private,
+ )
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "PUT")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
+ //c.Assert(req.Header["Content-MD5"], Equals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
+}
+
+// DelObject docs: http://goo.gl/APeTt
+
+func (s *S) TestDelObject(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.Del("name")
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "DELETE")
+ c.Assert(req.URL.Path, Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+}
+
+func (s *S) TestDelMultiObjects(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ objects := []s3.Object{s3.Object{Key: "test"}}
+ err := b.DelMulti(s3.Delete{
+ Quiet: false,
+ Objects: objects,
+ })
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "POST")
+ c.Assert(req.URL.RawQuery, Equals, "delete=")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+ c.Assert(req.Header["Content-MD5"], Not(Equals), "")
+ c.Assert(req.Header["Content-Type"], Not(Equals), "")
+ c.Assert(req.ContentLength, Not(Equals), "")
+}
+
+// Bucket List Objects docs: http://goo.gl/YjQTc
+
+func (s *S) TestList(c *C) {
+ testServer.Response(200, nil, GetListResultDump1)
+
+ b := s.s3.Bucket("quotes")
+
+ data, err := b.List("N", "", "", 0)
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/quotes/")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+ c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
+ c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
+ c.Assert(req.Form["marker"], DeepEquals, []string{""})
+ c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
+
+ c.Assert(data.Name, Equals, "quotes")
+ c.Assert(data.Prefix, Equals, "N")
+ c.Assert(data.IsTruncated, Equals, false)
+ c.Assert(len(data.Contents), Equals, 2)
+
+ c.Assert(data.Contents[0].Key, Equals, "Nelson")
+ c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
+ c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
+ c.Assert(data.Contents[0].Size, Equals, int64(5))
+ c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
+ c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
+ c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
+
+ c.Assert(data.Contents[1].Key, Equals, "Neo")
+ c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
+ c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
+ c.Assert(data.Contents[1].Size, Equals, int64(4))
+ c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
+ c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
+ c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
+}
+
+func (s *S) TestListWithDelimiter(c *C) {
+ testServer.Response(200, nil, GetListResultDump2)
+
+ b := s.s3.Bucket("quotes")
+
+ data, err := b.List("photos/2006/", "/", "some-marker", 1000)
+ c.Assert(err, IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, Equals, "GET")
+ c.Assert(req.URL.Path, Equals, "/quotes/")
+ c.Assert(req.Header["Date"], Not(Equals), "")
+ c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
+ c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
+ c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
+ c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
+
+ c.Assert(data.Name, Equals, "example-bucket")
+ c.Assert(data.Prefix, Equals, "photos/2006/")
+ c.Assert(data.Delimiter, Equals, "/")
+ c.Assert(data.Marker, Equals, "some-marker")
+ c.Assert(data.IsTruncated, Equals, false)
+ c.Assert(len(data.Contents), Equals, 0)
+ c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
+}
+
+func (s *S) TestExists(c *C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, Equals, "HEAD")
+
+ c.Assert(err, IsNil)
+ c.Assert(result, Equals, true)
+}
+
+func (s *S) TestExistsNotFound404(c *C) {
+ testServer.Response(404, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, Equals, "HEAD")
+
+ c.Assert(err, IsNil)
+ c.Assert(result, Equals, false)
+}
+
+func (s *S) TestExistsNotFound403(c *C) {
+ testServer.Response(403, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, Equals, "HEAD")
+
+ c.Assert(err, IsNil)
+ c.Assert(result, Equals, false)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go
new file mode 100644
index 000000000..1b898efc4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go
@@ -0,0 +1,590 @@
+package s3_test
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/goamz/goamz/aws"
+ "github.com/goamz/goamz/s3"
+ "github.com/goamz/goamz/testutil"
+ . "gopkg.in/check.v1"
+)
+
+// AmazonServer represents an Amazon S3 server.
+type AmazonServer struct {
+ auth aws.Auth
+}
+
+func (s *AmazonServer) SetUp(c *C) {
+ auth, err := aws.EnvAuth()
+ if err != nil {
+ c.Fatal(err.Error())
+ }
+ s.auth = auth
+}
+
+var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
+var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
+var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
+
+// AmazonClientSuite tests the client against a live S3 server.
+type AmazonClientSuite struct {
+ aws.Region
+ srv AmazonServer
+ ClientTests
+}
+
+func (s *AmazonClientSuite) SetUpSuite(c *C) {
+ if !testutil.Amazon {
+ c.Skip("live tests against AWS disabled (no -amazon)")
+ }
+ s.srv.SetUp(c)
+ s.s3 = s3.New(s.srv.auth, s.Region)
+ // In case tests were interrupted in the middle before.
+ s.ClientTests.Cleanup()
+}
+
+func (s *AmazonClientSuite) TearDownTest(c *C) {
+ s.ClientTests.Cleanup()
+}
+
+// AmazonDomainClientSuite tests the client against a live S3
+// server using bucket names in the endpoint domain name rather
+// than the request path.
+type AmazonDomainClientSuite struct {
+ aws.Region
+ srv AmazonServer
+ ClientTests
+}
+
+func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
+ if !testutil.Amazon {
+ c.Skip("live tests against AWS disabled (no -amazon)")
+ }
+ s.srv.SetUp(c)
+ region := s.Region
+ region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
+ s.s3 = s3.New(s.srv.auth, region)
+ s.ClientTests.Cleanup()
+}
+
+func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
+ s.ClientTests.Cleanup()
+}
+
+// ClientTests defines integration tests designed to test the client.
+// It is not used as a test suite in itself, but embedded within
+// another type.
+type ClientTests struct {
+ s3 *s3.S3
+ authIsBroken bool
+}
+
+func (s *ClientTests) Cleanup() {
+ killBucket(testBucket(s.s3))
+}
+
+func testBucket(s *s3.S3) *s3.Bucket {
+ // Watch out! If this function is corrupted and made to match with something
+ // people own, killBucket will happily remove *everything* inside the bucket.
+ key := s.Auth.AccessKey
+ if len(key) >= 8 {
+ key = s.Auth.AccessKey[:8]
+ }
+ return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
+}
+
+var attempts = aws.AttemptStrategy{
+ Min: 5,
+ Total: 20 * time.Second,
+ Delay: 100 * time.Millisecond,
+}
+
+func killBucket(b *s3.Bucket) {
+ var err error
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.DelBucket()
+ if err == nil {
+ return
+ }
+ if _, ok := err.(*net.DNSError); ok {
+ return
+ }
+ e, ok := err.(*s3.Error)
+ if ok && e.Code == "NoSuchBucket" {
+ return
+ }
+ if ok && e.Code == "BucketNotEmpty" {
+ // Errors are ignored here. Just retry.
+ resp, err := b.List("", "", "", 1000)
+ if err == nil {
+ for _, key := range resp.Contents {
+ _ = b.Del(key.Key)
+ }
+ }
+ multis, _, _ := b.ListMulti("", "")
+ for _, m := range multis {
+ _ = m.Abort()
+ }
+ }
+ }
+ message := "cannot delete test bucket"
+ if err != nil {
+ message += ": " + err.Error()
+ }
+ panic(message)
+}
+
+func get(url string) ([]byte, error) {
+ for attempt := attempts.Start(); attempt.Next(); {
+ resp, err := http.Get(url)
+ if err != nil {
+ if attempt.HasNext() {
+ continue
+ }
+ return nil, err
+ }
+ data, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if attempt.HasNext() {
+ continue
+ }
+ return nil, err
+ }
+ return data, err
+ }
+ panic("unreachable")
+}
+
+func (s *ClientTests) TestBasicFunctionality(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.PublicRead)
+ c.Assert(err, IsNil)
+
+ err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{})
+ c.Assert(err, IsNil)
+ defer b.Del("name")
+
+ data, err := b.Get("name")
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "yo!")
+
+ data, err = get(b.URL("name"))
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "yo!")
+
+ buf := bytes.NewBufferString("hey!")
+ err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, IsNil)
+ defer b.Del("name2")
+
+ rc, err := b.GetReader("name2")
+ c.Assert(err, IsNil)
+ data, err = ioutil.ReadAll(rc)
+ c.Check(err, IsNil)
+ c.Check(string(data), Equals, "hey!")
+ rc.Close()
+
+ data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hey!")
+
+ if !s.authIsBroken {
+ data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Matches, "(?s).*AccessDenied.*")
+ }
+
+ err = b.DelBucket()
+ c.Assert(err, NotNil)
+
+ s3err, ok := err.(*s3.Error)
+ c.Assert(ok, Equals, true)
+ c.Assert(s3err.Code, Equals, "BucketNotEmpty")
+ c.Assert(s3err.BucketName, Equals, b.Name)
+ c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty")
+
+ err = b.Del("name")
+ c.Assert(err, IsNil)
+ err = b.Del("name2")
+ c.Assert(err, IsNil)
+
+ err = b.DelBucket()
+ c.Assert(err, IsNil)
+}
+
+func (s *ClientTests) TestGetNotFound(c *C) {
+ b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
+ data, err := b.Get("non-existent")
+
+ s3err, _ := err.(*s3.Error)
+ c.Assert(s3err, NotNil)
+ c.Assert(s3err.StatusCode, Equals, 404)
+ c.Assert(s3err.Code, Equals, "NoSuchBucket")
+ c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
+ c.Assert(data, IsNil)
+}
+
+// Communicate with all endpoints to see if they are alive.
+func (s *ClientTests) TestRegions(c *C) {
+ errs := make(chan error, len(aws.Regions))
+ for _, region := range aws.Regions {
+ go func(r aws.Region) {
+ s := s3.New(s.s3.Auth, r)
+ b := s.Bucket("goamz-" + s.Auth.AccessKey)
+ _, err := b.Get("non-existent")
+ errs <- err
+ }(region)
+ }
+ for _ = range aws.Regions {
+ err := <-errs
+ if err != nil {
+ s3_err, ok := err.(*s3.Error)
+ if ok {
+ c.Check(s3_err.Code, Matches, "NoSuchBucket")
+ } else if _, ok = err.(*net.DNSError); ok {
+ // Okay as well.
+ } else {
+ c.Errorf("Non-S3 error: %s", err)
+ }
+ } else {
+ c.Errorf("Test should have errored but it seems to have succeeded")
+ }
+ }
+}
+
+var objectNames = []string{
+ "index.html",
+ "index2.html",
+ "photos/2006/February/sample2.jpg",
+ "photos/2006/February/sample3.jpg",
+ "photos/2006/February/sample4.jpg",
+ "photos/2006/January/sample.jpg",
+ "test/bar",
+ "test/foo",
+}
+
+func keys(names ...string) []s3.Key {
+ ks := make([]s3.Key, len(names))
+ for i, name := range names {
+ ks[i].Key = name
+ }
+ return ks
+}
+
+// As the ListResp specifies all the parameters to the
+// request too, we use it to specify request parameters
+// and expected results. The Contents field is
+// used only for the key names inside it.
+var listTests = []s3.ListResp{
+ // normal list.
+ {
+ Contents: keys(objectNames...),
+ }, {
+ Marker: objectNames[0],
+ Contents: keys(objectNames[1:]...),
+ }, {
+ Marker: objectNames[0] + "a",
+ Contents: keys(objectNames[1:]...),
+ }, {
+ Marker: "z",
+ },
+
+ // limited results.
+ {
+ MaxKeys: 2,
+ Contents: keys(objectNames[0:2]...),
+ IsTruncated: true,
+ }, {
+ MaxKeys: 2,
+ Marker: objectNames[0],
+ Contents: keys(objectNames[1:3]...),
+ IsTruncated: true,
+ }, {
+ MaxKeys: 2,
+ Marker: objectNames[len(objectNames)-2],
+ Contents: keys(objectNames[len(objectNames)-1:]...),
+ },
+
+ // with delimiter
+ {
+ Delimiter: "/",
+ CommonPrefixes: []string{"photos/", "test/"},
+ Contents: keys("index.html", "index2.html"),
+ }, {
+ Delimiter: "/",
+ Prefix: "photos/2006/",
+ CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
+ }, {
+ Delimiter: "/",
+ Prefix: "t",
+ CommonPrefixes: []string{"test/"},
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Contents: keys("index.html"),
+ IsTruncated: true,
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Marker: "index2.html",
+ CommonPrefixes: []string{"photos/"},
+ IsTruncated: true,
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Marker: "photos/",
+ CommonPrefixes: []string{"test/"},
+ IsTruncated: false,
+ }, {
+ Delimiter: "Feb",
+ CommonPrefixes: []string{"photos/2006/Feb"},
+ Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
+ },
+}
+
+func (s *ClientTests) TestDoublePutBucket(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.PublicRead)
+ c.Assert(err, IsNil)
+
+ err = b.PutBucket(s3.PublicRead)
+ if err != nil {
+ c.Assert(err, FitsTypeOf, new(s3.Error))
+ c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
+ }
+}
+
+func (s *ClientTests) TestBucketList(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ objData := make(map[string][]byte)
+ for i, path := range objectNames {
+ data := []byte(strings.Repeat("a", i))
+ err := b.Put(path, data, "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, IsNil)
+ defer b.Del(path)
+ objData[path] = data
+ }
+
+ for i, t := range listTests {
+ c.Logf("test %d", i)
+ resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
+ c.Assert(err, IsNil)
+ c.Check(resp.Name, Equals, b.Name)
+ c.Check(resp.Delimiter, Equals, t.Delimiter)
+ c.Check(resp.IsTruncated, Equals, t.IsTruncated)
+ c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
+ checkContents(c, resp.Contents, objData, t.Contents)
+ }
+}
+
+func etag(data []byte) string {
+ sum := md5.New()
+ sum.Write(data)
+ return fmt.Sprintf(`"%x"`, sum.Sum(nil))
+}
+
+func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
+ c.Assert(contents, HasLen, len(expected))
+ for i, k := range contents {
+ c.Check(k.Key, Equals, expected[i].Key)
+ // TODO mtime
+ c.Check(k.Size, Equals, int64(len(data[k.Key])))
+ c.Check(k.ETag, Equals, etag(data[k.Key]))
+ }
+}
+
+func (s *ClientTests) TestMultiInitPutList(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+ c.Assert(multi.UploadId, Matches, ".+")
+ defer multi.Abort()
+
+ var sent []s3.Part
+
+ for i := 0; i < 5; i++ {
+ p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
+ c.Assert(err, IsNil)
+ c.Assert(p.N, Equals, i+1)
+ c.Assert(p.Size, Equals, int64(8))
+ c.Assert(p.ETag, Matches, ".+")
+ sent = append(sent, p)
+ }
+
+ s3.SetListPartsMax(2)
+
+ parts, err := multi.ListParts()
+ c.Assert(err, IsNil)
+ c.Assert(parts, HasLen, len(sent))
+ for i := range parts {
+ c.Assert(parts[i].N, Equals, sent[i].N)
+ c.Assert(parts[i].Size, Equals, sent[i].Size)
+ c.Assert(parts[i].ETag, Equals, sent[i].ETag)
+ }
+
+ err = multi.Complete(parts)
+ s3err, failed := err.(*s3.Error)
+ c.Assert(failed, Equals, true)
+ c.Assert(s3err.Code, Equals, "EntityTooSmall")
+
+ err = multi.Abort()
+ c.Assert(err, IsNil)
+ _, err = multi.ListParts()
+ s3err, ok := err.(*s3.Error)
+ c.Assert(ok, Equals, true)
+ c.Assert(s3err.Code, Equals, "NoSuchUpload")
+}
+
+// This may take a minute or more due to the minimum size accepted S3
+// on multipart upload parts.
+func (s *ClientTests) TestMultiComplete(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+ c.Assert(multi.UploadId, Matches, ".+")
+ defer multi.Abort()
+
+ // Minimum size S3 accepts for all but the last part is 5MB.
+ data1 := make([]byte, 5*1024*1024)
+ data2 := []byte("<part 2>")
+
+ part1, err := multi.PutPart(1, bytes.NewReader(data1))
+ c.Assert(err, IsNil)
+ part2, err := multi.PutPart(2, bytes.NewReader(data2))
+ c.Assert(err, IsNil)
+
+ // Purposefully reversed. The order requirement must be handled.
+ err = multi.Complete([]s3.Part{part2, part1})
+ c.Assert(err, IsNil)
+
+ data, err := b.Get("multi")
+ c.Assert(err, IsNil)
+
+ c.Assert(len(data), Equals, len(data1)+len(data2))
+ for i := range data1 {
+ if data[i] != data1[i] {
+ c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
+ }
+ }
+ c.Assert(string(data[len(data1):]), Equals, string(data2))
+}
+
+type multiList []*s3.Multi
+
+func (l multiList) Len() int { return len(l) }
+func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
+func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+
+func (s *ClientTests) TestListMulti(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ // Ensure an empty state before testing its behavior.
+ multis, _, err := b.ListMulti("", "")
+ for _, m := range multis {
+ err := m.Abort()
+ c.Assert(err, IsNil)
+ }
+
+ keys := []string{
+ "a/multi2",
+ "a/multi3",
+ "b/multi4",
+ "multi1",
+ }
+ for _, key := range keys {
+ m, err := b.InitMulti(key, "", s3.Private)
+ c.Assert(err, IsNil)
+ defer m.Abort()
+ }
+
+ // Amazon's implementation of the multiple-request listing for
+ // multipart uploads in progress seems broken in multiple ways.
+ // (next tokens are not provided, etc).
+ //s3.SetListMultiMax(2)
+
+ multis, prefixes, err := b.ListMulti("", "")
+ c.Assert(err, IsNil)
+ for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, IsNil)
+ }
+ sort.Sort(multiList(multis))
+ c.Assert(prefixes, IsNil)
+ var gotKeys []string
+ for _, m := range multis {
+ gotKeys = append(gotKeys, m.Key)
+ }
+ c.Assert(gotKeys, DeepEquals, keys)
+ for _, m := range multis {
+ c.Assert(m.Bucket, Equals, b)
+ c.Assert(m.UploadId, Matches, ".+")
+ }
+
+ multis, prefixes, err = b.ListMulti("", "/")
+ for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, IsNil)
+ }
+ c.Assert(err, IsNil)
+ c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
+ c.Assert(multis, HasLen, 1)
+ c.Assert(multis[0].Bucket, Equals, b)
+ c.Assert(multis[0].Key, Equals, "multi1")
+ c.Assert(multis[0].UploadId, Matches, ".+")
+
+ for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, IsNil)
+ }
+ multis, prefixes, err = b.ListMulti("a/", "/")
+ c.Assert(err, IsNil)
+ c.Assert(prefixes, IsNil)
+ c.Assert(multis, HasLen, 2)
+ c.Assert(multis[0].Bucket, Equals, b)
+ c.Assert(multis[0].Key, Equals, "a/multi2")
+ c.Assert(multis[0].UploadId, Matches, ".+")
+ c.Assert(multis[1].Bucket, Equals, b)
+ c.Assert(multis[1].Key, Equals, "a/multi3")
+ c.Assert(multis[1].UploadId, Matches, ".+")
+}
+
+func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private)
+ c.Assert(err, IsNil)
+ defer multi.Abort()
+
+ // This tests an edge case. Amazon requires at least one
+ // part for multiprat uploads to work, even the part is empty.
+ parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
+ c.Assert(err, IsNil)
+ c.Assert(parts, HasLen, 1)
+ c.Assert(parts[0].Size, Equals, int64(0))
+ c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
+
+ err = multi.Complete(parts)
+ c.Assert(err, IsNil)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go
new file mode 100644
index 000000000..e98c50b29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go
@@ -0,0 +1,79 @@
+package s3_test
+
+import (
+ "github.com/goamz/goamz/aws"
+ "github.com/goamz/goamz/s3"
+ "github.com/goamz/goamz/s3/s3test"
+ . "gopkg.in/check.v1"
+)
+
+type LocalServer struct {
+ auth aws.Auth
+ region aws.Region
+ srv *s3test.Server
+ config *s3test.Config
+}
+
+func (s *LocalServer) SetUp(c *C) {
+ srv, err := s3test.NewServer(s.config)
+ c.Assert(err, IsNil)
+ c.Assert(srv, NotNil)
+
+ s.srv = srv
+ s.region = aws.Region{
+ Name: "faux-region-1",
+ S3Endpoint: srv.URL(),
+ S3LocationConstraint: true, // s3test server requires a LocationConstraint
+ }
+}
+
+// LocalServerSuite defines tests that will run
+// against the local s3test server. It includes
+// selected tests from ClientTests;
+// when the s3test functionality is sufficient, it should
+// include all of them, and ClientTests can be simply embedded.
+type LocalServerSuite struct {
+ srv LocalServer
+ clientTests ClientTests
+}
+
+var (
+ // run tests twice, once in us-east-1 mode, once not.
+ _ = Suite(&LocalServerSuite{})
+ _ = Suite(&LocalServerSuite{
+ srv: LocalServer{
+ config: &s3test.Config{
+ Send409Conflict: true,
+ },
+ },
+ })
+)
+
+func (s *LocalServerSuite) SetUpSuite(c *C) {
+ s.srv.SetUp(c)
+ s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
+
+ // TODO Sadly the fake server ignores auth completely right now. :-(
+ s.clientTests.authIsBroken = true
+ s.clientTests.Cleanup()
+}
+
+func (s *LocalServerSuite) TearDownTest(c *C) {
+ s.clientTests.Cleanup()
+}
+
+func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
+ s.clientTests.TestBasicFunctionality(c)
+}
+
+func (s *LocalServerSuite) TestGetNotFound(c *C) {
+ s.clientTests.TestGetNotFound(c)
+}
+
+func (s *LocalServerSuite) TestBucketList(c *C) {
+ s.clientTests.TestBucketList(c)
+}
+
+func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
+ s.clientTests.TestDoublePutBucket(c)
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go
new file mode 100644
index 000000000..10d36924f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go
@@ -0,0 +1,629 @@
+package s3test
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "github.com/goamz/goamz/s3"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const debug = false
+
+type s3Error struct {
+ statusCode int
+ XMLName struct{} `xml:"Error"`
+ Code string
+ Message string
+ BucketName string
+ RequestId string
+ HostId string
+}
+
+type action struct {
+ srv *Server
+ w http.ResponseWriter
+ req *http.Request
+ reqId string
+}
+
+// Config controls the internal behaviour of the Server. A nil config is the default
+// and behaves as if all configurations assume their default behaviour. Once passed
+// to NewServer, the configuration must not be modified.
+type Config struct {
+ // Send409Conflict controls how the Server will respond to calls to PUT on a
+ // previously existing bucket. The default is false, and corresponds to the
+ // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
+ // all other regions.
+ // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
+ Send409Conflict bool
+}
+
+func (c *Config) send409Conflict() bool {
+ if c != nil {
+ return c.Send409Conflict
+ }
+ return false
+}
+
+// Server is a fake S3 server for testing purposes.
+// All of the data for the server is kept in memory.
+type Server struct {
+ url string
+ reqId int
+ listener net.Listener
+ mu sync.Mutex
+ buckets map[string]*bucket
+ config *Config
+}
+
+type bucket struct {
+ name string
+ acl s3.ACL
+ ctime time.Time
+ objects map[string]*object
+}
+
+type object struct {
+ name string
+ mtime time.Time
+ meta http.Header // metadata to return with requests.
+ checksum []byte // also held as Content-MD5 in meta.
+ data []byte
+}
+
+// A resource encapsulates the subject of an HTTP request.
+// The resource referred to may or may not exist
+// when the request is made.
+type resource interface {
+ put(a *action) interface{}
+ get(a *action) interface{}
+ post(a *action) interface{}
+ delete(a *action) interface{}
+}
+
+func NewServer(config *Config) (*Server, error) {
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ return nil, fmt.Errorf("cannot listen on localhost: %v", err)
+ }
+ srv := &Server{
+ listener: l,
+ url: "http://" + l.Addr().String(),
+ buckets: make(map[string]*bucket),
+ config: config,
+ }
+ go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ srv.serveHTTP(w, req)
+ }))
+ return srv, nil
+}
+
+// Quit closes down the server.
+func (srv *Server) Quit() {
+ srv.listener.Close()
+}
+
+// URL returns a URL for the server.
+func (srv *Server) URL() string {
+ return srv.url
+}
+
+func fatalf(code int, codeStr string, errf string, a ...interface{}) {
+ panic(&s3Error{
+ statusCode: code,
+ Code: codeStr,
+ Message: fmt.Sprintf(errf, a...),
+ })
+}
+
+// serveHTTP serves the S3 protocol.
+func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
+ // ignore error from ParseForm as it's usually spurious.
+ req.ParseForm()
+
+ srv.mu.Lock()
+ defer srv.mu.Unlock()
+
+ if debug {
+ log.Printf("s3test %q %q", req.Method, req.URL)
+ }
+ a := &action{
+ srv: srv,
+ w: w,
+ req: req,
+ reqId: fmt.Sprintf("%09X", srv.reqId),
+ }
+ srv.reqId++
+
+ var r resource
+ defer func() {
+ switch err := recover().(type) {
+ case *s3Error:
+ switch r := r.(type) {
+ case objectResource:
+ err.BucketName = r.bucket.name
+ case bucketResource:
+ err.BucketName = r.name
+ }
+ err.RequestId = a.reqId
+ // TODO HostId
+ w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
+ w.WriteHeader(err.statusCode)
+ xmlMarshal(w, err)
+ case nil:
+ default:
+ panic(err)
+ }
+ }()
+
+ r = srv.resourceForURL(req.URL)
+
+ var resp interface{}
+ switch req.Method {
+ case "PUT":
+ resp = r.put(a)
+ case "GET", "HEAD":
+ resp = r.get(a)
+ case "DELETE":
+ resp = r.delete(a)
+ case "POST":
+ resp = r.post(a)
+ default:
+ fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
+ }
+ if resp != nil && req.Method != "HEAD" {
+ xmlMarshal(w, resp)
+ }
+}
+
+// xmlMarshal is the same as xml.Marshal except that
+// it panics on error. The marshalling should not fail,
+// but we want to know if it does.
+func xmlMarshal(w io.Writer, x interface{}) {
+ if err := xml.NewEncoder(w).Encode(x); err != nil {
+ panic(fmt.Errorf("error marshalling %#v: %v", x, err))
+ }
+}
+
+// In a fully implemented test server, each of these would have
+// its own resource type.
+var unimplementedBucketResourceNames = map[string]bool{
+ "acl": true,
+ "lifecycle": true,
+ "policy": true,
+ "location": true,
+ "logging": true,
+ "notification": true,
+ "versions": true,
+ "requestPayment": true,
+ "versioning": true,
+ "website": true,
+ "uploads": true,
+}
+
+var unimplementedObjectResourceNames = map[string]bool{
+ "uploadId": true,
+ "acl": true,
+ "torrent": true,
+ "uploads": true,
+}
+
+var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
+
+// resourceForURL returns a resource object for the given URL.
+func (srv *Server) resourceForURL(u *url.URL) (r resource) {
+ m := pathRegexp.FindStringSubmatch(u.Path)
+ if m == nil {
+ fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
+ }
+ bucketName := m[2]
+ objectName := m[4]
+ if bucketName == "" {
+ return nullResource{} // root
+ }
+ b := bucketResource{
+ name: bucketName,
+ bucket: srv.buckets[bucketName],
+ }
+ q := u.Query()
+ if objectName == "" {
+ for name := range q {
+ if unimplementedBucketResourceNames[name] {
+ return nullResource{}
+ }
+ }
+ return b
+
+ }
+ if b.bucket == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ objr := objectResource{
+ name: objectName,
+ version: q.Get("versionId"),
+ bucket: b.bucket,
+ }
+ for name := range q {
+ if unimplementedObjectResourceNames[name] {
+ return nullResource{}
+ }
+ }
+ if obj := objr.bucket.objects[objr.name]; obj != nil {
+ objr.object = obj
+ }
+ return objr
+}
+
+// nullResource has error stubs for all resource methods.
+type nullResource struct{}
+
+func notAllowed() interface{} {
+ fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
+ return nil
+}
+
+func (nullResource) put(a *action) interface{} { return notAllowed() }
+func (nullResource) get(a *action) interface{} { return notAllowed() }
+func (nullResource) post(a *action) interface{} { return notAllowed() }
+func (nullResource) delete(a *action) interface{} { return notAllowed() }
+
+const timeFormat = "2006-01-02T15:04:05.000Z07:00"
+
+type bucketResource struct {
+ name string
+ bucket *bucket // non-nil if the bucket already exists.
+}
+
+// GET on a bucket lists the objects in the bucket.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
+func (r bucketResource) get(a *action) interface{} {
+ if r.bucket == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ delimiter := a.req.Form.Get("delimiter")
+ marker := a.req.Form.Get("marker")
+ maxKeys := -1
+ if s := a.req.Form.Get("max-keys"); s != "" {
+ i, err := strconv.Atoi(s)
+ if err != nil || i < 0 {
+ fatalf(400, "invalid value for max-keys: %q", s)
+ }
+ maxKeys = i
+ }
+ prefix := a.req.Form.Get("prefix")
+ a.w.Header().Set("Content-Type", "application/xml")
+
+ if a.req.Method == "HEAD" {
+ return nil
+ }
+
+ var objs orderedObjects
+
+ // first get all matching objects and arrange them in alphabetical order.
+ for name, obj := range r.bucket.objects {
+ if strings.HasPrefix(name, prefix) {
+ objs = append(objs, obj)
+ }
+ }
+ sort.Sort(objs)
+
+ if maxKeys <= 0 {
+ maxKeys = 1000
+ }
+ resp := &s3.ListResp{
+ Name: r.bucket.name,
+ Prefix: prefix,
+ Delimiter: delimiter,
+ Marker: marker,
+ MaxKeys: maxKeys,
+ }
+
+ var prefixes []string
+ for _, obj := range objs {
+ if !strings.HasPrefix(obj.name, prefix) {
+ continue
+ }
+ name := obj.name
+ isPrefix := false
+ if delimiter != "" {
+ if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
+ name = obj.name[:len(prefix)+i+len(delimiter)]
+ if prefixes != nil && prefixes[len(prefixes)-1] == name {
+ continue
+ }
+ isPrefix = true
+ }
+ }
+ if name <= marker {
+ continue
+ }
+ if len(resp.Contents)+len(prefixes) >= maxKeys {
+ resp.IsTruncated = true
+ break
+ }
+ if isPrefix {
+ prefixes = append(prefixes, name)
+ } else {
+ // Contents contains only keys not found in CommonPrefixes
+ resp.Contents = append(resp.Contents, obj.s3Key())
+ }
+ }
+ resp.CommonPrefixes = prefixes
+ return resp
+}
+
+// orderedObjects holds a slice of objects that can be sorted
+// by name.
+type orderedObjects []*object
+
+func (s orderedObjects) Len() int {
+ return len(s)
+}
+func (s orderedObjects) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s orderedObjects) Less(i, j int) bool {
+ return s[i].name < s[j].name
+}
+
+func (obj *object) s3Key() s3.Key {
+ return s3.Key{
+ Key: obj.name,
+ LastModified: obj.mtime.Format(timeFormat),
+ Size: int64(len(obj.data)),
+ ETag: fmt.Sprintf(`"%x"`, obj.checksum),
+ // TODO StorageClass
+ // TODO Owner
+ }
+}
+
+// DELETE on a bucket deletes the bucket if it's not empty.
+func (r bucketResource) delete(a *action) interface{} {
+ b := r.bucket
+ if b == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ if len(b.objects) > 0 {
+ fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
+ }
+ delete(a.srv.buckets, b.name)
+ return nil
+}
+
+// PUT on a bucket creates the bucket.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
+func (r bucketResource) put(a *action) interface{} {
+ var created bool
+ if r.bucket == nil {
+ if !validBucketName(r.name) {
+ fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
+ }
+ if loc := locationConstraint(a); loc == "" {
+ fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
+ }
+ // TODO validate acl
+ r.bucket = &bucket{
+ name: r.name,
+ // TODO default acl
+ objects: make(map[string]*object),
+ }
+ a.srv.buckets[r.name] = r.bucket
+ created = true
+ }
+ if !created && a.srv.config.send409Conflict() {
+ fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
+ }
+ r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
+ return nil
+}
+
+func (bucketResource) post(a *action) interface{} {
+ fatalf(400, "Method", "bucket POST method not available")
+ return nil
+}
+
+// validBucketName returns whether name is a valid bucket name.
+// Here are the rules, from:
+// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
+//
+// Can contain lowercase letters, numbers, periods (.), underscores (_),
+// and dashes (-). You can use uppercase letters for buckets only in the
+// US Standard region.
+//
+// Must start with a number or letter
+//
+// Must be between 3 and 255 characters long
+//
+// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
+// but the real S3 server does not seem to check that rule, so we will not
+// check it either.
+//
+func validBucketName(name string) bool {
+ if len(name) < 3 || len(name) > 255 {
+ return false
+ }
+ r := name[0]
+ if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
+ return false
+ }
+ for _, r := range name {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'a' && r <= 'z':
+ case r == '_' || r == '-':
+ case r == '.':
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+var responseParams = map[string]bool{
+ "content-type": true,
+ "content-language": true,
+ "expires": true,
+ "cache-control": true,
+ "content-disposition": true,
+ "content-encoding": true,
+}
+
+type objectResource struct {
+ name string
+ version string
+ bucket *bucket // always non-nil.
+ object *object // may be nil.
+}
+
+// GET on an object gets the contents of the object.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
+func (objr objectResource) get(a *action) interface{} {
+ obj := objr.object
+ if obj == nil {
+ fatalf(404, "NoSuchKey", "The specified key does not exist.")
+ }
+ h := a.w.Header()
+ // add metadata
+ for name, d := range obj.meta {
+ h[name] = d
+ }
+ // override header values in response to request parameters.
+ for name, vals := range a.req.Form {
+ if strings.HasPrefix(name, "response-") {
+ name = name[len("response-"):]
+ if !responseParams[name] {
+ continue
+ }
+ h.Set(name, vals[0])
+ }
+ }
+ if r := a.req.Header.Get("Range"); r != "" {
+ fatalf(400, "NotImplemented", "range unimplemented")
+ }
+ // TODO Last-Modified-Since
+ // TODO If-Modified-Since
+ // TODO If-Unmodified-Since
+ // TODO If-Match
+ // TODO If-None-Match
+ // TODO Connection: close ??
+ // TODO x-amz-request-id
+ h.Set("Content-Length", fmt.Sprint(len(obj.data)))
+ h.Set("ETag", hex.EncodeToString(obj.checksum))
+ h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
+ if a.req.Method == "HEAD" {
+ return nil
+ }
+ // TODO avoid holding the lock when writing data.
+ _, err := a.w.Write(obj.data)
+ if err != nil {
+ // we can't do much except just log the fact.
+ log.Printf("error writing data: %v", err)
+ }
+ return nil
+}
+
+var metaHeaders = map[string]bool{
+ "Content-MD5": true,
+ "x-amz-acl": true,
+ "Content-Type": true,
+ "Content-Encoding": true,
+ "Content-Disposition": true,
+}
+
+// PUT on an object creates the object.
+func (objr objectResource) put(a *action) interface{} {
+ // TODO Cache-Control header
+ // TODO Expires header
+ // TODO x-amz-server-side-encryption
+ // TODO x-amz-storage-class
+
+ // TODO is this correct, or should we erase all previous metadata?
+ obj := objr.object
+ if obj == nil {
+ obj = &object{
+ name: objr.name,
+ meta: make(http.Header),
+ }
+ }
+
+ var expectHash []byte
+ if c := a.req.Header.Get("Content-MD5"); c != "" {
+ var err error
+ expectHash, err = base64.StdEncoding.DecodeString(c)
+ if err != nil || len(expectHash) != md5.Size {
+ fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
+ }
+ }
+ sum := md5.New()
+ // TODO avoid holding lock while reading data.
+ data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
+ if err != nil {
+ fatalf(400, "TODO", "read error")
+ }
+ gotHash := sum.Sum(nil)
+ if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
+ fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
+ }
+ if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
+ fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
+ }
+
+ // PUT request has been successful - save data and metadata
+ for key, values := range a.req.Header {
+ key = http.CanonicalHeaderKey(key)
+ if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
+ obj.meta[key] = values
+ }
+ }
+ obj.data = data
+ obj.checksum = gotHash
+ obj.mtime = time.Now()
+ objr.bucket.objects[objr.name] = obj
+ return nil
+}
+
+func (objr objectResource) delete(a *action) interface{} {
+ delete(objr.bucket.objects, objr.name)
+ return nil
+}
+
+func (objr objectResource) post(a *action) interface{} {
+ fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
+ return nil
+}
+
+type CreateBucketConfiguration struct {
+ LocationConstraint string
+}
+
+// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
+// If there is no body, an empty string will be returned.
+func locationConstraint(a *action) string {
+ var body bytes.Buffer
+ if _, err := io.Copy(&body, a.req.Body); err != nil {
+ fatalf(400, "InvalidRequest", err.Error())
+ }
+ if body.Len() == 0 {
+ return ""
+ }
+ var loc CreateBucketConfiguration
+ if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
+ fatalf(400, "InvalidRequest", err.Error())
+ }
+ return loc.LocationConstraint
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go
new file mode 100644
index 000000000..c8e57a2f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go
@@ -0,0 +1,114 @@
+package s3
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "github.com/goamz/goamz/aws"
+ "log"
+ "sort"
+ "strings"
+)
+
+var b64 = base64.StdEncoding
+
+// ----------------------------------------------------------------------------
+// S3 signing (http://goo.gl/G1LrK)
+
+var s3ParamsToSign = map[string]bool{
+ "acl": true,
+ "location": true,
+ "logging": true,
+ "notification": true,
+ "partNumber": true,
+ "policy": true,
+ "requestPayment": true,
+ "torrent": true,
+ "uploadId": true,
+ "uploads": true,
+ "versionId": true,
+ "versioning": true,
+ "versions": true,
+ "response-content-type": true,
+ "response-content-language": true,
+ "response-expires": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "website": true,
+ "delete": true,
+}
+
+func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
+ var md5, ctype, date, xamz string
+ var xamzDate bool
+ var sarray []string
+ for k, v := range headers {
+ k = strings.ToLower(k)
+ switch k {
+ case "content-md5":
+ md5 = v[0]
+ case "content-type":
+ ctype = v[0]
+ case "date":
+ if !xamzDate {
+ date = v[0]
+ }
+ default:
+ if strings.HasPrefix(k, "x-amz-") {
+ vall := strings.Join(v, ",")
+ sarray = append(sarray, k+":"+vall)
+ if k == "x-amz-date" {
+ xamzDate = true
+ date = ""
+ }
+ }
+ }
+ }
+ if len(sarray) > 0 {
+ sort.StringSlice(sarray).Sort()
+ xamz = strings.Join(sarray, "\n") + "\n"
+ }
+
+ expires := false
+ if v, ok := params["Expires"]; ok {
+ // Query string request authentication alternative.
+ expires = true
+ date = v[0]
+ params["AWSAccessKeyId"] = []string{auth.AccessKey}
+ }
+
+ sarray = sarray[0:0]
+ for k, v := range params {
+ if s3ParamsToSign[k] {
+ for _, vi := range v {
+ if vi == "" {
+ sarray = append(sarray, k)
+ } else {
+ // "When signing you do not encode these values."
+ sarray = append(sarray, k+"="+vi)
+ }
+ }
+ }
+ }
+ if len(sarray) > 0 {
+ sort.StringSlice(sarray).Sort()
+ canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
+ }
+
+ payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
+ hash := hmac.New(sha1.New, []byte(auth.SecretKey))
+ hash.Write([]byte(payload))
+ signature := make([]byte, b64.EncodedLen(hash.Size()))
+ b64.Encode(signature, hash.Sum(nil))
+
+ if expires {
+ params["Signature"] = []string{string(signature)}
+ } else {
+ headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
+ }
+ if debug {
+ log.Printf("Signature payload: %q", payload)
+ log.Printf("Signature: %q", signature)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go
new file mode 100644
index 000000000..112e1ca3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go
@@ -0,0 +1,132 @@
+package s3_test
+
+import (
+ "github.com/goamz/goamz/aws"
+ "github.com/goamz/goamz/s3"
+ . "gopkg.in/check.v1"
+)
+
+// S3 ReST authentication docs: http://goo.gl/G1LrK
+
+var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"}
+
+func (s *S) TestSignExampleObjectGet(c *C) {
+ method := "GET"
+ path := "/johnsmith/photos/puppy.jpg"
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleObjectPut(c *C) {
+ method := "PUT"
+ path := "/johnsmith/photos/puppy.jpg"
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
+ "Content-Type": {"image/jpeg"},
+ "Content-Length": {"94328"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleList(c *C) {
+ method := "GET"
+ path := "/johnsmith/"
+ params := map[string][]string{
+ "prefix": {"photos"},
+ "max-keys": {"50"},
+ "marker": {"puppy"},
+ }
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
+ "User-Agent": {"Mozilla/5.0"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleFetch(c *C) {
+ method := "GET"
+ path := "/johnsmith/"
+ params := map[string][]string{
+ "acl": {""},
+ }
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleDelete(c *C) {
+ method := "DELETE"
+ path := "/johnsmith/photos/puppy.jpg"
+ params := map[string][]string{}
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
+ "User-Agent": {"dotnet"},
+ "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleUpload(c *C) {
+ method := "PUT"
+ path := "/static.johnsmith.net/db-backup.dat.gz"
+ params := map[string][]string{}
+ headers := map[string][]string{
+ "Host": {"static.johnsmith.net:8080"},
+ "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
+ "User-Agent": {"curl/7.15.5"},
+ "x-amz-acl": {"public-read"},
+ "content-type": {"application/x-download"},
+ "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
+ "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
+ "X-Amz-Meta-FileChecksum": {"0x02661779"},
+ "X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
+ "Content-Disposition": {"attachment; filename=database.dat"},
+ "Content-Encoding": {"gzip"},
+ "Content-Length": {"5913339"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleListAllMyBuckets(c *C) {
+ method := "GET"
+ path := "/"
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleUnicodeKeys(c *C) {
+ method := "GET"
+ path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
+ c.Assert(headers["Authorization"], DeepEquals, []string{expected})
+}