summaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/github.com/goamz
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2016-05-12 15:08:58 -0400
committerChristopher Speller <crspeller@gmail.com>2016-05-12 16:37:29 -0400
commit84d2482ddbff9564c9ad75b2d30af66e3ddfd44d (patch)
tree8bfa567d2b6381f4a996ada2deff8a16aa85a3ac /Godeps/_workspace/src/github.com/goamz
parentd1efb66ad7b017f0fbfe6f0c20843b30f396e504 (diff)
downloadchat-84d2482ddbff9564c9ad75b2d30af66e3ddfd44d.tar.gz
chat-84d2482ddbff9564c9ad75b2d30af66e3ddfd44d.tar.bz2
chat-84d2482ddbff9564c9ad75b2d30af66e3ddfd44d.zip
Updating go depencancies. Switching to go1.6 vendoring (#2949)
Diffstat (limited to 'Godeps/_workspace/src/github.com/goamz')
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/LICENSE185
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go74
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go432
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go124
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go254
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go357
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go439
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go1155
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go633
-rw-r--r--Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go141
10 files changed, 0 insertions, 3794 deletions
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/LICENSE b/Godeps/_workspace/src/github.com/goamz/goamz/LICENSE
deleted file mode 100644
index 53320c352..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/LICENSE
+++ /dev/null
@@ -1,185 +0,0 @@
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go
deleted file mode 100644
index c0654f5d8..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package aws
-
-import (
- "time"
-)
-
-// AttemptStrategy represents a strategy for waiting for an action
-// to complete successfully. This is an internal type used by the
-// implementation of other goamz packages.
-type AttemptStrategy struct {
- Total time.Duration // total duration of attempt.
- Delay time.Duration // interval between each try in the burst.
- Min int // minimum number of retries; overrides Total
-}
-
-type Attempt struct {
- strategy AttemptStrategy
- last time.Time
- end time.Time
- force bool
- count int
-}
-
-// Start begins a new sequence of attempts for the given strategy.
-func (s AttemptStrategy) Start() *Attempt {
- now := time.Now()
- return &Attempt{
- strategy: s,
- last: now,
- end: now.Add(s.Total),
- force: true,
- }
-}
-
-// Next waits until it is time to perform the next attempt or returns
-// false if it is time to stop trying.
-func (a *Attempt) Next() bool {
- now := time.Now()
- sleep := a.nextSleep(now)
- if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
- return false
- }
- a.force = false
- if sleep > 0 && a.count > 0 {
- time.Sleep(sleep)
- now = time.Now()
- }
- a.count++
- a.last = now
- return true
-}
-
-func (a *Attempt) nextSleep(now time.Time) time.Duration {
- sleep := a.strategy.Delay - now.Sub(a.last)
- if sleep < 0 {
- return 0
- }
- return sleep
-}
-
-// HasNext returns whether another attempt will be made if the current
-// one fails. If it returns true, the following call to Next is
-// guaranteed to return true.
-func (a *Attempt) HasNext() bool {
- if a.force || a.strategy.Min > a.count {
- return true
- }
- now := time.Now()
- if now.Add(a.nextSleep(now)).Before(a.end) {
- a.force = true
- return true
- }
- return false
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go
deleted file mode 100644
index 77bf563d6..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go
+++ /dev/null
@@ -1,432 +0,0 @@
-//
-// goamz - Go packages to interact with the Amazon Web Services.
-//
-// https://wiki.ubuntu.com/goamz
-//
-// Copyright (c) 2011 Canonical Ltd.
-//
-// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
-//
-package aws
-
-import (
- "encoding/json"
- "encoding/xml"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "time"
-
- "github.com/vaughan0/go-ini"
-)
-
-// Defines the valid signers
-const (
- V2Signature = iota
- V4Signature = iota
- Route53Signature = iota
-)
-
-// Defines the service endpoint and correct Signer implementation to use
-// to sign requests for this endpoint
-type ServiceInfo struct {
- Endpoint string
- Signer uint
-}
-
-// Region defines the URLs where AWS services may be accessed.
-//
-// See http://goo.gl/d8BP1 for more details.
-type Region struct {
- Name string // the canonical name of this region.
- EC2Endpoint string
- S3Endpoint string
- S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
- S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
- S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
- SDBEndpoint string
- SESEndpoint string
- SNSEndpoint string
- SQSEndpoint string
- IAMEndpoint string
- ELBEndpoint string
- DynamoDBEndpoint string
- CloudWatchServicepoint ServiceInfo
- AutoScalingEndpoint string
- RDSEndpoint ServiceInfo
- STSEndpoint string
- CloudFormationEndpoint string
- ECSEndpoint string
- DynamoDBStreamsEndpoint string
-}
-
-var Regions = map[string]Region{
- APNortheast.Name: APNortheast,
- APSoutheast.Name: APSoutheast,
- APSoutheast2.Name: APSoutheast2,
- EUCentral.Name: EUCentral,
- EUWest.Name: EUWest,
- USEast.Name: USEast,
- USWest.Name: USWest,
- USWest2.Name: USWest2,
- USGovWest.Name: USGovWest,
- SAEast.Name: SAEast,
- CNNorth.Name: CNNorth,
-}
-
-// Designates a signer interface suitable for signing AWS requests, params
-// should be appropriately encoded for the request before signing.
-//
-// A signer should be initialized with Auth and the appropriate endpoint.
-type Signer interface {
- Sign(method, path string, params map[string]string)
-}
-
-// An AWS Service interface with the API to query the AWS service
-//
-// Supplied as an easy way to mock out service calls during testing.
-type AWSService interface {
- // Queries the AWS service at a given method/path with the params and
- // returns an http.Response and error
- Query(method, path string, params map[string]string) (*http.Response, error)
- // Builds an error given an XML payload in the http.Response, can be used
- // to process an error if the status code is not 200 for example.
- BuildError(r *http.Response) error
-}
-
-// Implements a Server Query/Post API to easily query AWS services and build
-// errors when desired
-type Service struct {
- service ServiceInfo
- signer Signer
-}
-
-// Create a base set of params for an action
-func MakeParams(action string) map[string]string {
- params := make(map[string]string)
- params["Action"] = action
- return params
-}
-
-// Create a new AWS server to handle making requests
-func NewService(auth Auth, service ServiceInfo) (s *Service, err error) {
- var signer Signer
- switch service.Signer {
- case V2Signature:
- signer, err = NewV2Signer(auth, service)
- // case V4Signature:
- // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"])
- default:
- err = fmt.Errorf("Unsupported signer for service")
- }
- if err != nil {
- return
- }
- s = &Service{service: service, signer: signer}
- return
-}
-
-func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) {
- params["Timestamp"] = time.Now().UTC().Format(time.RFC3339)
- u, err := url.Parse(s.service.Endpoint)
- if err != nil {
- return nil, err
- }
- u.Path = path
-
- s.signer.Sign(method, path, params)
- if method == "GET" {
- u.RawQuery = multimap(params).Encode()
- resp, err = http.Get(u.String())
- } else if method == "POST" {
- resp, err = http.PostForm(u.String(), multimap(params))
- }
-
- return
-}
-
-func (s *Service) BuildError(r *http.Response) error {
- errors := ErrorResponse{}
- xml.NewDecoder(r.Body).Decode(&errors)
- var err Error
- err = errors.Errors
- err.RequestId = errors.RequestId
- err.StatusCode = r.StatusCode
- if err.Message == "" {
- err.Message = r.Status
- }
- return &err
-}
-
-type ErrorResponse struct {
- Errors Error `xml:"Error"`
- RequestId string // A unique ID for tracking the request
-}
-
-type Error struct {
- StatusCode int
- Type string
- Code string
- Message string
- RequestId string
-}
-
-func (err *Error) Error() string {
- return fmt.Sprintf("Type: %s, Code: %s, Message: %s",
- err.Type, err.Code, err.Message,
- )
-}
-
-type Auth struct {
- AccessKey, SecretKey string
- token string
- expiration time.Time
-}
-
-func (a *Auth) Token() string {
- if a.token == "" {
- return ""
- }
- if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
- *a, _ = GetAuth("", "", "", time.Time{})
- }
- return a.token
-}
-
-func (a *Auth) Expiration() time.Time {
- return a.expiration
-}
-
-// To be used with other APIs that return auth credentials such as STS
-func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth {
- return &Auth{
- AccessKey: accessKey,
- SecretKey: secretKey,
- token: token,
- expiration: expiration,
- }
-}
-
-// ResponseMetadata
-type ResponseMetadata struct {
- RequestId string // A unique ID for tracking the request
-}
-
-type BaseResponse struct {
- ResponseMetadata ResponseMetadata
-}
-
-var unreserved = make([]bool, 128)
-var hex = "0123456789ABCDEF"
-
-func init() {
- // RFC3986
- u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
- for _, c := range u {
- unreserved[c] = true
- }
-}
-
-func multimap(p map[string]string) url.Values {
- q := make(url.Values, len(p))
- for k, v := range p {
- q[k] = []string{v}
- }
- return q
-}
-
-type credentials struct {
- Code string
- LastUpdated string
- Type string
- AccessKeyId string
- SecretAccessKey string
- Token string
- Expiration string
-}
-
-// GetMetaData retrieves instance metadata about the current machine.
-//
-// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
-func GetMetaData(path string) (contents []byte, err error) {
- url := "http://169.254.169.254/latest/meta-data/" + path
-
- resp, err := RetryingClient.Get(url)
- if err != nil {
- return
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != 200 {
- err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
- return
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return
- }
- return []byte(body), err
-}
-
-func getInstanceCredentials() (cred credentials, err error) {
- credentialPath := "iam/security-credentials/"
-
- // Get the instance role
- role, err := GetMetaData(credentialPath)
- if err != nil {
- return
- }
-
- // Get the instance role credentials
- credentialJSON, err := GetMetaData(credentialPath + string(role))
- if err != nil {
- return
- }
-
- err = json.Unmarshal([]byte(credentialJSON), &cred)
- return
-}
-
-// GetAuth creates an Auth based on either passed in credentials,
-// environment information or instance based role credentials.
-func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) {
- // First try passed in credentials
- if accessKey != "" && secretKey != "" {
- return Auth{accessKey, secretKey, token, expiration}, nil
- }
-
- // Next try to get auth from the shared credentials file
- auth, err = SharedAuth()
- if err == nil {
- // Found auth, return
- return
- }
-
- // Next try to get auth from the environment
- auth, err = EnvAuth()
- if err == nil {
- // Found auth, return
- return
- }
-
- // Next try getting auth from the instance role
- cred, err := getInstanceCredentials()
- if err == nil {
- // Found auth, return
- auth.AccessKey = cred.AccessKeyId
- auth.SecretKey = cred.SecretAccessKey
- auth.token = cred.Token
- exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration)
- if err != nil {
- err = fmt.Errorf("Error Parseing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err)
- }
- auth.expiration = exptdate
- return auth, err
- }
- err = errors.New("No valid AWS authentication found")
- return auth, err
-}
-
-// EnvAuth creates an Auth based on environment information.
-// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
-// variables are used.
-// AWS_SESSION_TOKEN is used if present.
-func EnvAuth() (auth Auth, err error) {
- auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
- if auth.AccessKey == "" {
- auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
- }
-
- auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
- if auth.SecretKey == "" {
- auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
- }
- if auth.AccessKey == "" {
- err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
- }
- if auth.SecretKey == "" {
- err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
- }
-
- auth.token = os.Getenv("AWS_SESSION_TOKEN")
- return
-}
-
-// SharedAuth creates an Auth based on shared credentials stored in
-// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
-// select the profile.
-func SharedAuth() (auth Auth, err error) {
- var profileName = os.Getenv("AWS_PROFILE")
-
- if profileName == "" {
- profileName = "default"
- }
-
- var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE")
- if credentialsFile == "" {
- var homeDir = os.Getenv("HOME")
- if homeDir == "" {
- err = errors.New("Could not get HOME")
- return
- }
- credentialsFile = homeDir + "/.aws/credentials"
- }
-
- file, err := ini.LoadFile(credentialsFile)
- if err != nil {
- err = errors.New("Couldn't parse AWS credentials file")
- return
- }
-
- var profile = file[profileName]
- if profile == nil {
- err = errors.New("Couldn't find profile in AWS credentials file")
- return
- }
-
- auth.AccessKey = profile["aws_access_key_id"]
- auth.SecretKey = profile["aws_secret_access_key"]
-
- if auth.AccessKey == "" {
- err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
- }
- if auth.SecretKey == "" {
- err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
- }
- return
-}
-
-// Encode takes a string and URI-encodes it in a way suitable
-// to be used in AWS signatures.
-func Encode(s string) string {
- encode := false
- for i := 0; i != len(s); i++ {
- c := s[i]
- if c > 127 || !unreserved[c] {
- encode = true
- break
- }
- }
- if !encode {
- return s
- }
- e := make([]byte, len(s)*3)
- ei := 0
- for i := 0; i != len(s); i++ {
- c := s[i]
- if c > 127 || !unreserved[c] {
- e[ei] = '%'
- e[ei+1] = hex[c>>4]
- e[ei+2] = hex[c&0xF]
- ei += 3
- } else {
- e[ei] = c
- ei += 1
- }
- }
- return string(e[:ei])
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go
deleted file mode 100644
index 86d2ccec8..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package aws
-
-import (
- "math"
- "net"
- "net/http"
- "time"
-)
-
-type RetryableFunc func(*http.Request, *http.Response, error) bool
-type WaitFunc func(try int)
-type DeadlineFunc func() time.Time
-
-type ResilientTransport struct {
- // Timeout is the maximum amount of time a dial will wait for
- // a connect to complete.
- //
- // The default is no timeout.
- //
- // With or without a timeout, the operating system may impose
- // its own earlier timeout. For instance, TCP timeouts are
- // often around 3 minutes.
- DialTimeout time.Duration
-
- // MaxTries, if non-zero, specifies the number of times we will retry on
- // failure. Retries are only attempted for temporary network errors or known
- // safe failures.
- MaxTries int
- Deadline DeadlineFunc
- ShouldRetry RetryableFunc
- Wait WaitFunc
- transport *http.Transport
-}
-
-// Convenience method for creating an http client
-func NewClient(rt *ResilientTransport) *http.Client {
- rt.transport = &http.Transport{
- Dial: func(netw, addr string) (net.Conn, error) {
- c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
- if err != nil {
- return nil, err
- }
- c.SetDeadline(rt.Deadline())
- return c, nil
- },
- Proxy: http.ProxyFromEnvironment,
- }
- // TODO: Would be nice is ResilientTransport allowed clients to initialize
- // with http.Transport attributes.
- return &http.Client{
- Transport: rt,
- }
-}
-
-var retryingTransport = &ResilientTransport{
- Deadline: func() time.Time {
- return time.Now().Add(5 * time.Second)
- },
- DialTimeout: 10 * time.Second,
- MaxTries: 3,
- ShouldRetry: awsRetry,
- Wait: ExpBackoff,
-}
-
-// Exported default client
-var RetryingClient = NewClient(retryingTransport)
-
-func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- return t.tries(req)
-}
-
-// Retry a request a maximum of t.MaxTries times.
-// We'll only retry if the proper criteria are met.
-// If a wait function is specified, wait that amount of time
-// In between requests.
-func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
- for try := 0; try < t.MaxTries; try += 1 {
- res, err = t.transport.RoundTrip(req)
-
- if !t.ShouldRetry(req, res, err) {
- break
- }
- if res != nil {
- res.Body.Close()
- }
- if t.Wait != nil {
- t.Wait(try)
- }
- }
-
- return
-}
-
-func ExpBackoff(try int) {
- time.Sleep(100 * time.Millisecond *
- time.Duration(math.Exp2(float64(try))))
-}
-
-func LinearBackoff(try int) {
- time.Sleep(time.Duration(try*100) * time.Millisecond)
-}
-
-// Decide if we should retry a request.
-// In general, the criteria for retrying a request is described here
-// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
-func awsRetry(req *http.Request, res *http.Response, err error) bool {
- retry := false
-
- // Retry if there's a temporary network error.
- if neterr, ok := err.(net.Error); ok {
- if neterr.Temporary() {
- retry = true
- }
- }
-
- // Retry if we get a 5xx series error.
- if res != nil {
- if res.StatusCode >= 500 && res.StatusCode < 600 {
- retry = true
- }
- }
-
- return retry
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go
deleted file mode 100644
index 5e18f023d..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package aws
-
-var USGovWest = Region{
- "us-gov-west-1",
- "https://ec2.us-gov-west-1.amazonaws.com",
- "https://s3-fips-us-gov-west-1.amazonaws.com",
- "",
- true,
- true,
- "",
- "",
- "https://sns.us-gov-west-1.amazonaws.com",
- "https://sqs.us-gov-west-1.amazonaws.com",
- "https://iam.us-gov.amazonaws.com",
- "https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
- "https://dynamodb.us-gov-west-1.amazonaws.com",
- ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
- "https://autoscaling.us-gov-west-1.amazonaws.com",
- ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.us-gov-west-1.amazonaws.com",
- "https://ecs.us-gov-west-1.amazonaws.com",
- "https://streams.dynamodb.us-gov-west-1.amazonaws.com",
-}
-
-var USEast = Region{
- "us-east-1",
- "https://ec2.us-east-1.amazonaws.com",
- "https://s3.amazonaws.com",
- "",
- false,
- false,
- "https://sdb.amazonaws.com",
- "https://email.us-east-1.amazonaws.com",
- "https://sns.us-east-1.amazonaws.com",
- "https://sqs.us-east-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.us-east-1.amazonaws.com",
- "https://dynamodb.us-east-1.amazonaws.com",
- ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
- "https://autoscaling.us-east-1.amazonaws.com",
- ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.us-east-1.amazonaws.com",
- "https://ecs.us-east-1.amazonaws.com",
- "https://streams.dynamodb.us-east-1.amazonaws.com",
-}
-
-var USWest = Region{
- "us-west-1",
- "https://ec2.us-west-1.amazonaws.com",
- "https://s3-us-west-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.us-west-1.amazonaws.com",
- "",
- "https://sns.us-west-1.amazonaws.com",
- "https://sqs.us-west-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.us-west-1.amazonaws.com",
- "https://dynamodb.us-west-1.amazonaws.com",
- ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
- "https://autoscaling.us-west-1.amazonaws.com",
- ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.us-west-1.amazonaws.com",
- "https://ecs.us-west-1.amazonaws.com",
- "https://streams.dynamodb.us-west-1.amazonaws.com",
-}
-
-var USWest2 = Region{
- "us-west-2",
- "https://ec2.us-west-2.amazonaws.com",
- "https://s3-us-west-2.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.us-west-2.amazonaws.com",
- "https://email.us-west-2.amazonaws.com",
- "https://sns.us-west-2.amazonaws.com",
- "https://sqs.us-west-2.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.us-west-2.amazonaws.com",
- "https://dynamodb.us-west-2.amazonaws.com",
- ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
- "https://autoscaling.us-west-2.amazonaws.com",
- ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.us-west-2.amazonaws.com",
- "https://ecs.us-west-2.amazonaws.com",
- "https://streams.dynamodb.us-west-2.amazonaws.com",
-}
-
-var EUWest = Region{
- "eu-west-1",
- "https://ec2.eu-west-1.amazonaws.com",
- "https://s3-eu-west-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.eu-west-1.amazonaws.com",
- "https://email.eu-west-1.amazonaws.com",
- "https://sns.eu-west-1.amazonaws.com",
- "https://sqs.eu-west-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.eu-west-1.amazonaws.com",
- "https://dynamodb.eu-west-1.amazonaws.com",
- ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
- "https://autoscaling.eu-west-1.amazonaws.com",
- ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.eu-west-1.amazonaws.com",
- "https://ecs.eu-west-1.amazonaws.com",
- "https://streams.dynamodb.eu-west-1.amazonaws.com",
-}
-
-var EUCentral = Region{
- "eu-central-1",
- "https://ec2.eu-central-1.amazonaws.com",
- "https://s3-eu-central-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.eu-central-1.amazonaws.com",
- "https://email.eu-central-1.amazonaws.com",
- "https://sns.eu-central-1.amazonaws.com",
- "https://sqs.eu-central-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.eu-central-1.amazonaws.com",
- "https://dynamodb.eu-central-1.amazonaws.com",
- ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
- "https://autoscaling.eu-central-1.amazonaws.com",
- ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.eu-central-1.amazonaws.com",
- "https://ecs.eu-central-1.amazonaws.com",
- "https://streams.dynamodb.eu-central-1.amazonaws.com",
-}
-
-var APSoutheast = Region{
- "ap-southeast-1",
- "https://ec2.ap-southeast-1.amazonaws.com",
- "https://s3-ap-southeast-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.ap-southeast-1.amazonaws.com",
- "",
- "https://sns.ap-southeast-1.amazonaws.com",
- "https://sqs.ap-southeast-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
- "https://dynamodb.ap-southeast-1.amazonaws.com",
- ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
- "https://autoscaling.ap-southeast-1.amazonaws.com",
- ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.ap-southeast-1.amazonaws.com",
- "https://ecs.ap-southeast-1.amazonaws.com",
- "https://streams.dynamodb.ap-southeast-1.amazonaws.com",
-}
-
-var APSoutheast2 = Region{
- "ap-southeast-2",
- "https://ec2.ap-southeast-2.amazonaws.com",
- "https://s3-ap-southeast-2.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.ap-southeast-2.amazonaws.com",
- "",
- "https://sns.ap-southeast-2.amazonaws.com",
- "https://sqs.ap-southeast-2.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
- "https://dynamodb.ap-southeast-2.amazonaws.com",
- ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
- "https://autoscaling.ap-southeast-2.amazonaws.com",
- ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.ap-southeast-2.amazonaws.com",
- "https://ecs.ap-southeast-2.amazonaws.com",
- "https://streams.dynamodb.ap-southeast-2.amazonaws.com",
-}
-
-var APNortheast = Region{
- "ap-northeast-1",
- "https://ec2.ap-northeast-1.amazonaws.com",
- "https://s3-ap-northeast-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.ap-northeast-1.amazonaws.com",
- "",
- "https://sns.ap-northeast-1.amazonaws.com",
- "https://sqs.ap-northeast-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
- "https://dynamodb.ap-northeast-1.amazonaws.com",
- ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
- "https://autoscaling.ap-northeast-1.amazonaws.com",
- ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.ap-northeast-1.amazonaws.com",
- "https://ecs.ap-northeast-1.amazonaws.com",
- "https://streams.dynamodb.ap-northeast-1.amazonaws.com",
-}
-
-var SAEast = Region{
- "sa-east-1",
- "https://ec2.sa-east-1.amazonaws.com",
- "https://s3-sa-east-1.amazonaws.com",
- "",
- true,
- true,
- "https://sdb.sa-east-1.amazonaws.com",
- "",
- "https://sns.sa-east-1.amazonaws.com",
- "https://sqs.sa-east-1.amazonaws.com",
- "https://iam.amazonaws.com",
- "https://elasticloadbalancing.sa-east-1.amazonaws.com",
- "https://dynamodb.sa-east-1.amazonaws.com",
- ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
- "https://autoscaling.sa-east-1.amazonaws.com",
- ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature},
- "https://sts.amazonaws.com",
- "https://cloudformation.sa-east-1.amazonaws.com",
- "https://ecs.sa-east-1.amazonaws.com",
- "https://streams.dynamodb.sa-east-1.amazonaws.com",
-}
-
-var CNNorth = Region{
- "cn-north-1",
- "https://ec2.cn-north-1.amazonaws.com.cn",
- "https://s3.cn-north-1.amazonaws.com.cn",
- "",
- true,
- true,
- "https://sdb.cn-north-1.amazonaws.com.cn",
- "",
- "https://sns.cn-north-1.amazonaws.com.cn",
- "https://sqs.cn-north-1.amazonaws.com.cn",
- "https://iam.cn-north-1.amazonaws.com.cn",
- "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
- "https://dynamodb.cn-north-1.amazonaws.com.cn",
- ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature},
- "https://autoscaling.cn-north-1.amazonaws.com.cn",
- ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature},
- "https://sts.cn-north-1.amazonaws.com.cn",
- "https://cloudformation.cn-north-1.amazonaws.com.cn",
- "https://ecs.cn-north-1.amazonaws.com.cn",
- "https://streams.dynamodb.cn-north-1.amazonaws.com.cn",
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go
deleted file mode 100644
index 5cf990525..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package aws
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "path"
- "sort"
- "strings"
- "time"
-)
-
-type V2Signer struct {
- auth Auth
- service ServiceInfo
- host string
-}
-
-var b64 = base64.StdEncoding
-
-func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) {
- u, err := url.Parse(service.Endpoint)
- if err != nil {
- return nil, err
- }
- return &V2Signer{auth: auth, service: service, host: u.Host}, nil
-}
-
-func (s *V2Signer) Sign(method, path string, params map[string]string) {
- params["AWSAccessKeyId"] = s.auth.AccessKey
- params["SignatureVersion"] = "2"
- params["SignatureMethod"] = "HmacSHA256"
- if s.auth.Token() != "" {
- params["SecurityToken"] = s.auth.Token()
- }
-
- // AWS specifies that the parameters in a signed request must
- // be provided in the natural order of the keys. This is distinct
- // from the natural order of the encoded value of key=value.
- // Percent and Equals affect the sorting order.
- var keys, sarray []string
- for k, _ := range params {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- sarray = append(sarray, Encode(k)+"="+Encode(params[k]))
- }
- joined := strings.Join(sarray, "&")
- payload := method + "\n" + s.host + "\n" + path + "\n" + joined
- hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
- hash.Write([]byte(payload))
- signature := make([]byte, b64.EncodedLen(hash.Size()))
- b64.Encode(signature, hash.Sum(nil))
-
- params["Signature"] = string(signature)
-}
-
-// Common date formats for signing requests
-const (
- ISO8601BasicFormat = "20060102T150405Z"
- ISO8601BasicFormatShort = "20060102"
-)
-
-type Route53Signer struct {
- auth Auth
-}
-
-func NewRoute53Signer(auth Auth) *Route53Signer {
- return &Route53Signer{auth: auth}
-}
-
-// getCurrentDate fetches the date stamp from the aws servers to
-// ensure the auth headers are within 5 minutes of the server time
-func (s *Route53Signer) getCurrentDate() string {
- response, err := http.Get("https://route53.amazonaws.com/date")
- if err != nil {
- fmt.Print("Unable to get date from amazon: ", err)
- return ""
- }
-
- response.Body.Close()
- return response.Header.Get("Date")
-}
-
-// Creates the authorize signature based on the date stamp and secret key
-func (s *Route53Signer) getHeaderAuthorize(message string) string {
- hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey))
- hmacSha256.Write([]byte(message))
- cryptedString := hmacSha256.Sum(nil)
-
- return base64.StdEncoding.EncodeToString(cryptedString)
-}
-
-// Adds all the required headers for AWS Route53 API to the request
-// including the authorization
-func (s *Route53Signer) Sign(req *http.Request) {
- date := s.getCurrentDate()
- authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s",
- s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date))
-
- req.Header.Set("Host", req.Host)
- req.Header.Set("X-Amzn-Authorization", authHeader)
- req.Header.Set("X-Amz-Date", date)
- req.Header.Set("Content-Type", "application/xml")
-}
-
-/*
-The V4Signer encapsulates all of the functionality to sign a request with the AWS
-Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
-*/
-type V4Signer struct {
- auth Auth
- serviceName string
- region Region
-}
-
-/*
-Return a new instance of a V4Signer capable of signing AWS requests.
-*/
-func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer {
- return &V4Signer{auth: auth, serviceName: serviceName, region: region}
-}
-
-/*
-Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
-
-The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date"
-or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires
-the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from
-the request.Host.
-
-The signed request will include a new "Authorization" header indicating that the request has been signed.
-
-Any changes to the request after signing the request will invalidate the signature.
-*/
-func (s *V4Signer) Sign(req *http.Request) {
- req.Header.Set("host", req.Host) // host header must be included as a signed header
- t := s.requestTime(req) // Get requst time
- creq := s.canonicalRequest(req) // Build canonical request
- sts := s.stringToSign(t, creq) // Build string to sign
- signature := s.signature(t, sts) // Calculate the AWS Signature Version 4
- auth := s.authorization(req.Header, t, signature) // Create Authorization header value
- req.Header.Set("Authorization", auth) // Add Authorization header to request
- return
-}
-
-/*
-requestTime method will parse the time from the request "x-amz-date" or "date" headers.
-If the "x-amz-date" header is present, that will take priority over the "date" header.
-If neither header is defined or we are unable to parse either header as a valid date
-then we will create a new "x-amz-date" header with the current time.
-*/
-func (s *V4Signer) requestTime(req *http.Request) time.Time {
-
- // Get "x-amz-date" header
- date := req.Header.Get("x-amz-date")
-
- // Attempt to parse as ISO8601BasicFormat
- t, err := time.Parse(ISO8601BasicFormat, date)
- if err == nil {
- return t
- }
-
- // Attempt to parse as http.TimeFormat
- t, err = time.Parse(http.TimeFormat, date)
- if err == nil {
- req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
- return t
- }
-
- // Get "date" header
- date = req.Header.Get("date")
-
- // Attempt to parse as http.TimeFormat
- t, err = time.Parse(http.TimeFormat, date)
- if err == nil {
- return t
- }
-
- // Create a current time header to be used
- t = time.Now().UTC()
- req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
- return t
-}
-
-/*
-canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S)
-
- CanonicalRequest =
- HTTPRequestMethod + '\n' +
- CanonicalURI + '\n' +
- CanonicalQueryString + '\n' +
- CanonicalHeaders + '\n' +
- SignedHeaders + '\n' +
- HexEncode(Hash(Payload))
-*/
-func (s *V4Signer) canonicalRequest(req *http.Request) string {
- c := new(bytes.Buffer)
- fmt.Fprintf(c, "%s\n", req.Method)
- fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL))
- fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL))
- fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header))
- fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header))
- fmt.Fprintf(c, "%s", s.payloadHash(req))
- return c.String()
-}
-
-func (s *V4Signer) canonicalURI(u *url.URL) string {
- canonicalPath := u.RequestURI()
- if u.RawQuery != "" {
- canonicalPath = canonicalPath[:len(canonicalPath)-len(u.RawQuery)-1]
- }
- slash := strings.HasSuffix(canonicalPath, "/")
- canonicalPath = path.Clean(canonicalPath)
- if canonicalPath != "/" && slash {
- canonicalPath += "/"
- }
- return canonicalPath
-}
-
-func (s *V4Signer) canonicalQueryString(u *url.URL) string {
- var a []string
- for k, vs := range u.Query() {
- k = url.QueryEscape(k)
- for _, v := range vs {
- if v == "" {
- a = append(a, k+"=")
- } else {
- v = url.QueryEscape(v)
- a = append(a, k+"="+v)
- }
- }
- }
- sort.Strings(a)
- return strings.Join(a, "&")
-}
-
-func (s *V4Signer) canonicalHeaders(h http.Header) string {
- i, a := 0, make([]string, len(h))
- for k, v := range h {
- for j, w := range v {
- v[j] = strings.Trim(w, " ")
- }
- sort.Strings(v)
- a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",")
- i++
- }
- sort.Strings(a)
- return strings.Join(a, "\n")
-}
-
-func (s *V4Signer) signedHeaders(h http.Header) string {
- i, a := 0, make([]string, len(h))
- for k, _ := range h {
- a[i] = strings.ToLower(k)
- i++
- }
- sort.Strings(a)
- return strings.Join(a, ";")
-}
-
-func (s *V4Signer) payloadHash(req *http.Request) string {
- var b []byte
- if req.Body == nil {
- b = []byte("")
- } else {
- var err error
- b, err = ioutil.ReadAll(req.Body)
- if err != nil {
- // TODO: I REALLY DON'T LIKE THIS PANIC!!!!
- panic(err)
- }
- }
- req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
- return s.hash(string(b))
-}
-
-/*
-stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu)
-
- StringToSign =
- Algorithm + '\n' +
- RequestDate + '\n' +
- CredentialScope + '\n' +
- HexEncode(Hash(CanonicalRequest))
-*/
-func (s *V4Signer) stringToSign(t time.Time, creq string) string {
- w := new(bytes.Buffer)
- fmt.Fprint(w, "AWS4-HMAC-SHA256\n")
- fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat))
- fmt.Fprintf(w, "%s\n", s.credentialScope(t))
- fmt.Fprintf(w, "%s", s.hash(creq))
- return w.String()
-}
-
-func (s *V4Signer) credentialScope(t time.Time) string {
- return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName)
-}
-
-/*
-signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1)
-
- signature = HexEncode(HMAC(derived-signing-key, string-to-sign))
-*/
-func (s *V4Signer) signature(t time.Time, sts string) string {
- h := s.hmac(s.derivedKey(t), []byte(sts))
- return fmt.Sprintf("%x", h)
-}
-
-/*
-derivedKey method derives a signing key to be used for signing a request.
-
- kSecret = Your AWS Secret Access Key
- kDate = HMAC("AWS4" + kSecret, Date)
- kRegion = HMAC(kDate, Region)
- kService = HMAC(kRegion, Service)
- kSigning = HMAC(kService, "aws4_request")
-*/
-func (s *V4Signer) derivedKey(t time.Time) []byte {
- h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort)))
- h = s.hmac(h, []byte(s.region.Name))
- h = s.hmac(h, []byte(s.serviceName))
- h = s.hmac(h, []byte("aws4_request"))
- return h
-}
-
-/*
-authorization method generates the authorization header value.
-*/
-func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string {
- w := new(bytes.Buffer)
- fmt.Fprint(w, "AWS4-HMAC-SHA256 ")
- fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t))
- fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header))
- fmt.Fprintf(w, "Signature=%s", signature)
- return w.String()
-}
-
-// hash method calculates the sha256 hash for a given string
-func (s *V4Signer) hash(in string) string {
- h := sha256.New()
- fmt.Fprintf(h, "%s", in)
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-// hmac method calculates the sha256 hmac for a given slice of bytes
-func (s *V4Signer) hmac(key, data []byte) []byte {
- h := hmac.New(sha256.New, key)
- h.Write(data)
- return h.Sum(nil)
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go
deleted file mode 100644
index 348ead300..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go
+++ /dev/null
@@ -1,439 +0,0 @@
-package s3
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "encoding/hex"
- "encoding/xml"
- "errors"
- "io"
- "sort"
- "strconv"
-)
-
-// Multi represents an unfinished multipart upload.
-//
-// Multipart uploads allow sending big objects in smaller chunks.
-// After all parts have been sent, the upload must be explicitly
-// completed by calling Complete with the list of parts.
-//
-// See http://goo.gl/vJfTG for an overview of multipart uploads.
-type Multi struct {
- Bucket *Bucket
- Key string
- UploadId string
-}
-
-// That's the default. Here just for testing.
-var listMultiMax = 1000
-
-type listMultiResp struct {
- NextKeyMarker string
- NextUploadIdMarker string
- IsTruncated bool
- Upload []Multi
- CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
-}
-
-// ListMulti returns the list of unfinished multipart uploads in b.
-//
-// The prefix parameter limits the response to keys that begin with the
-// specified prefix. You can use prefixes to separate a bucket into different
-// groupings of keys (to get the feeling of folders, for example).
-//
-// The delim parameter causes the response to group all of the keys that
-// share a common prefix up to the next delimiter in a single entry within
-// the CommonPrefixes field. You can use delimiters to separate a bucket
-// into different groupings of keys, similar to how folders would work.
-//
-// See http://goo.gl/ePioY for details.
-func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
- params := map[string][]string{
- "uploads": {""},
- "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
- "prefix": {prefix},
- "delimiter": {delim},
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "GET",
- bucket: b.Name,
- params: params,
- }
- var resp listMultiResp
- err := b.S3.query(req, &resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, nil, err
- }
- for i := range resp.Upload {
- multi := &resp.Upload[i]
- multi.Bucket = b
- multis = append(multis, multi)
- }
- prefixes = append(prefixes, resp.CommonPrefixes...)
- if !resp.IsTruncated {
- return multis, prefixes, nil
- }
- params["key-marker"] = []string{resp.NextKeyMarker}
- params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
- attempt = b.S3.AttemptStrategy.Start() // Last request worked.
- }
- panic("unreachable")
-}
-
-// Multi returns a multipart upload handler for the provided key
-// inside b. If a multipart upload exists for key, it is returned,
-// otherwise a new multipart upload is initiated with contType and perm.
-func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
- multis, _, err := b.ListMulti(key, "")
- if err != nil && !hasCode(err, "NoSuchUpload") {
- return nil, err
- }
- for _, m := range multis {
- if m.Key == key {
- return m, nil
- }
- }
- return b.InitMulti(key, contType, perm)
-}
-
-// InitMulti initializes a new multipart upload at the provided
-// key inside b and returns a value for manipulating it.
-//
-// See http://goo.gl/XP8kL for details.
-func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
- headers := map[string][]string{
- "Content-Type": {contType},
- "Content-Length": {"0"},
- "x-amz-acl": {string(perm)},
- }
- params := map[string][]string{
- "uploads": {""},
- }
- req := &request{
- method: "POST",
- bucket: b.Name,
- path: key,
- headers: headers,
- params: params,
- }
- var err error
- var resp struct {
- UploadId string `xml:"UploadId"`
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, &resp)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
-}
-
-// PutPart sends part n of the multipart upload, reading all the content from r.
-// Each part, except for the last one, must be at least 5MB in size.
-//
-// See http://goo.gl/pqZer for details.
-func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
- partSize, _, md5b64, err := seekerInfo(r)
- if err != nil {
- return Part{}, err
- }
- return m.putPart(n, r, partSize, md5b64)
-}
-
-func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(partSize, 10)},
- "Content-MD5": {md5b64},
- }
- params := map[string][]string{
- "uploadId": {m.UploadId},
- "partNumber": {strconv.FormatInt(int64(n), 10)},
- }
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- _, err := r.Seek(0, 0)
- if err != nil {
- return Part{}, err
- }
- req := &request{
- method: "PUT",
- bucket: m.Bucket.Name,
- path: m.Key,
- headers: headers,
- params: params,
- payload: r,
- }
- err = m.Bucket.S3.prepare(req)
- if err != nil {
- return Part{}, err
- }
- resp, err := m.Bucket.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return Part{}, err
- }
- etag := resp.Header.Get("ETag")
- if etag == "" {
- return Part{}, errors.New("part upload succeeded with no ETag")
- }
- return Part{n, etag, partSize}, nil
- }
- panic("unreachable")
-}
-
-func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
- _, err = r.Seek(0, 0)
- if err != nil {
- return 0, "", "", err
- }
- digest := md5.New()
- size, err = io.Copy(digest, r)
- if err != nil {
- return 0, "", "", err
- }
- sum := digest.Sum(nil)
- md5hex = hex.EncodeToString(sum)
- md5b64 = base64.StdEncoding.EncodeToString(sum)
- return size, md5hex, md5b64, nil
-}
-
-type Part struct {
- N int `xml:"PartNumber"`
- ETag string
- Size int64
-}
-
-type partSlice []Part
-
-func (s partSlice) Len() int { return len(s) }
-func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
-func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type listPartsResp struct {
- NextPartNumberMarker string
- IsTruncated bool
- Part []Part
-}
-
-// That's the default. Here just for testing.
-var listPartsMax = 1000
-
-// ListParts returns the list of previously uploaded parts in m,
-// ordered by part number.
-//
-// See http://goo.gl/ePioY for details.
-func (m *Multi) ListParts() ([]Part, error) {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
- }
- var parts partSlice
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "GET",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- }
- var resp listPartsResp
- err := m.Bucket.S3.query(req, &resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- parts = append(parts, resp.Part...)
- if !resp.IsTruncated {
- sort.Sort(parts)
- return parts, nil
- }
- params["part-number-marker"] = []string{resp.NextPartNumberMarker}
- attempt = m.Bucket.S3.AttemptStrategy.Start() // Last request worked.
- }
- panic("unreachable")
-}
-
-type ReaderAtSeeker interface {
- io.ReaderAt
- io.ReadSeeker
-}
-
-// PutAll sends all of r via a multipart upload with parts no larger
-// than partSize bytes, which must be set to at least 5MB.
-// Parts previously uploaded are either reused if their checksum
-// and size match the new part, or otherwise overwritten with the
-// new content.
-// PutAll returns all the parts of m (reused or not).
-func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
- old, err := m.ListParts()
- if err != nil && !hasCode(err, "NoSuchUpload") {
- return nil, err
- }
- reuse := 0 // Index of next old part to consider reusing.
- current := 1 // Part number of latest good part handled.
- totalSize, err := r.Seek(0, 2)
- if err != nil {
- return nil, err
- }
- first := true // Must send at least one empty part if the file is empty.
- var result []Part
-NextSection:
- for offset := int64(0); offset < totalSize || first; offset += partSize {
- first = false
- if offset+partSize > totalSize {
- partSize = totalSize - offset
- }
- section := io.NewSectionReader(r, offset, partSize)
- _, md5hex, md5b64, err := seekerInfo(section)
- if err != nil {
- return nil, err
- }
- for reuse < len(old) && old[reuse].N <= current {
- // Looks like this part was already sent.
- part := &old[reuse]
- etag := `"` + md5hex + `"`
- if part.N == current && part.Size == partSize && part.ETag == etag {
- // Checksum matches. Reuse the old part.
- result = append(result, *part)
- current++
- continue NextSection
- }
- reuse++
- }
-
- // Part wasn't found or doesn't match. Send it.
- part, err := m.putPart(current, section, partSize, md5b64)
- if err != nil {
- return nil, err
- }
- result = append(result, part)
- current++
- }
- return result, nil
-}
-
-type completeUpload struct {
- XMLName xml.Name `xml:"CompleteMultipartUpload"`
- Parts completeParts `xml:"Part"`
-}
-
-type completePart struct {
- PartNumber int
- ETag string
-}
-
-type completeParts []completePart
-
-func (p completeParts) Len() int { return len(p) }
-func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
-func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-type completeResponse struct {
- // The element name: should be either CompleteMultipartUploadResult or Error.
- XMLName xml.Name
- // If the element was error, then it should have the following:
- Code string
- Message string
- RequestId string
- HostId string
-}
-
-// Complete assembles the given previously uploaded parts into the
-// final object. This operation may take several minutes.
-//
-// The complete call to AMZ may still fail after returning HTTP 200,
-// so even though it's unusued, the body of the reply must be demarshalled
-// and checked to see whether or not the complete succeeded.
-//
-// See http://goo.gl/2Z7Tw for details.
-func (m *Multi) Complete(parts []Part) error {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- }
- c := completeUpload{}
- for _, p := range parts {
- c.Parts = append(c.Parts, completePart{p.N, p.ETag})
- }
- sort.Sort(c.Parts)
- data, err := xml.Marshal(&c)
- if err != nil {
- return err
- }
-
- // Setting Content-Length prevents breakage on DreamObjects
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "POST",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- payload: bytes.NewReader(data),
- headers: map[string][]string{
- "Content-Length": []string{strconv.Itoa(len(data))},
- },
- }
-
- resp := &completeResponse{}
- err := m.Bucket.S3.query(req, resp)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err == nil && resp.XMLName.Local == "Error" {
- err = &Error{
- StatusCode: 200,
- Code: resp.Code,
- Message: resp.Message,
- RequestId: resp.RequestId,
- HostId: resp.HostId,
- }
- }
- return err
- }
- panic("unreachable")
-}
-
-// Abort deletes an unifinished multipart upload and any previously
-// uploaded parts for it.
-//
-// After a multipart upload is aborted, no additional parts can be
-// uploaded using it. However, if any part uploads are currently in
-// progress, those part uploads might or might not succeed. As a result,
-// it might be necessary to abort a given multipart upload multiple
-// times in order to completely free all storage consumed by all parts.
-//
-// NOTE: If the described scenario happens to you, please report back to
-// the goamz authors with details. In the future such retrying should be
-// handled internally, but it's not clear what happens precisely (Is an
-// error returned? Is the issue completely undetectable?).
-//
-// See http://goo.gl/dnyJw for details.
-func (m *Multi) Abort() error {
- params := map[string][]string{
- "uploadId": {m.UploadId},
- }
- for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); {
- req := &request{
- method: "DELETE",
- bucket: m.Bucket.Name,
- path: m.Key,
- params: params,
- }
- err := m.Bucket.S3.query(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- return err
- }
- panic("unreachable")
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go
deleted file mode 100644
index 9490c9b96..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go
+++ /dev/null
@@ -1,1155 +0,0 @@
-//
-// goamz - Go packages to interact with the Amazon Web Services.
-//
-// https://wiki.ubuntu.com/goamz
-//
-// Copyright (c) 2011 Canonical Ltd.
-//
-// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
-//
-
-package s3
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/md5"
- "crypto/sha1"
- "encoding/base64"
- "encoding/xml"
- "fmt"
- "github.com/goamz/goamz/aws"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-const debug = false
-
-// The S3 type encapsulates operations with an S3 region.
-type S3 struct {
- aws.Auth
- aws.Region
-
- // ConnectTimeout is the maximum time a request attempt will
- // wait for a successful connection to be made.
- //
- // A value of zero means no timeout.
- ConnectTimeout time.Duration
-
- // ReadTimeout is the maximum time a request attempt will wait
- // for an individual read to complete.
- //
- // A value of zero means no timeout.
- ReadTimeout time.Duration
-
- // WriteTimeout is the maximum time a request attempt will
- // wait for an individual write to complete.
- //
- // A value of zero means no timeout.
- WriteTimeout time.Duration
-
- // RequestTimeout is the maximum time a request attempt can
- // take before operations return a timeout error.
- //
- // This includes connection time, any redirects, and reading
- // the response body. The timer remains running after the request
- // is made so it can interrupt reading of the response data.
- //
- // A Timeout of zero means no timeout.
- RequestTimeout time.Duration
-
- // AttemptStrategy is the attempt strategy used for requests.
- aws.AttemptStrategy
-
- // Reserve the right of using private data.
- private byte
-
- // client used for requests
- client *http.Client
-}
-
-// The Bucket type encapsulates operations with an S3 bucket.
-type Bucket struct {
- *S3
- Name string
-}
-
-// The Owner type represents the owner of the object in an S3 bucket.
-type Owner struct {
- ID string
- DisplayName string
-}
-
-// Fold options into an Options struct
-//
-type Options struct {
- SSE bool
- Meta map[string][]string
- ContentEncoding string
- CacheControl string
- RedirectLocation string
- ContentMD5 string
- // What else?
- // Content-Disposition string
- //// The following become headers so they are []strings rather than strings... I think
- // x-amz-storage-class []string
-}
-
-type CopyOptions struct {
- Options
- MetadataDirective string
- ContentType string
-}
-
-// CopyObjectResult is the output from a Copy request
-type CopyObjectResult struct {
- ETag string
- LastModified string
-}
-
-// DefaultAttemptStrategy is the default AttemptStrategy used by S3 objects created by New.
-var DefaultAttemptStrategy = aws.AttemptStrategy{
- Min: 5,
- Total: 5 * time.Second,
- Delay: 200 * time.Millisecond,
-}
-
-// New creates a new S3. Optional client argument allows for custom http.clients to be used.
-func New(auth aws.Auth, region aws.Region, client ...*http.Client) *S3 {
-
- var httpclient *http.Client
-
- if len(client) > 0 {
- httpclient = client[0]
- }
-
- return &S3{Auth: auth, Region: region, AttemptStrategy: DefaultAttemptStrategy, client: httpclient}
-}
-
-// Bucket returns a Bucket with the given name.
-func (s3 *S3) Bucket(name string) *Bucket {
- if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
- name = strings.ToLower(name)
- }
- return &Bucket{s3, name}
-}
-
-var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <LocationConstraint>%s</LocationConstraint>
-</CreateBucketConfiguration>`
-
-// locationConstraint returns an io.Reader specifying a LocationConstraint if
-// required for the region.
-//
-// See http://goo.gl/bh9Kq for details.
-func (s3 *S3) locationConstraint() io.Reader {
- constraint := ""
- if s3.Region.S3LocationConstraint {
- constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
- }
- return strings.NewReader(constraint)
-}
-
-type ACL string
-
-const (
- Private = ACL("private")
- PublicRead = ACL("public-read")
- PublicReadWrite = ACL("public-read-write")
- AuthenticatedRead = ACL("authenticated-read")
- BucketOwnerRead = ACL("bucket-owner-read")
- BucketOwnerFull = ACL("bucket-owner-full-control")
-)
-
-// PutBucket creates a new bucket.
-//
-// See http://goo.gl/ndjnR for details.
-func (b *Bucket) PutBucket(perm ACL) error {
- headers := map[string][]string{
- "x-amz-acl": {string(perm)},
- }
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: "/",
- headers: headers,
- payload: b.locationConstraint(),
- }
- return b.S3.query(req, nil)
-}
-
-// DelBucket removes an existing S3 bucket. All objects in the bucket must
-// be removed before the bucket itself can be removed.
-//
-// See http://goo.gl/GoBrY for details.
-func (b *Bucket) DelBucket() (err error) {
- req := &request{
- method: "DELETE",
- bucket: b.Name,
- path: "/",
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, nil)
- if !shouldRetry(err) {
- break
- }
- }
- return err
-}
-
-// Get retrieves an object from an S3 bucket.
-//
-// See http://goo.gl/isCO7 for details.
-func (b *Bucket) Get(path string) (data []byte, err error) {
- body, err := b.GetReader(path)
- defer func() {
- if body != nil {
- body.Close()
- }
- }()
- if err != nil {
- return nil, err
- }
- data, err = ioutil.ReadAll(body)
- return data, err
-}
-
-// GetReader retrieves an object from an S3 bucket,
-// returning the body of the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading.
-func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
- resp, err := b.GetResponse(path)
- if resp != nil {
- return resp.Body, err
- }
- return nil, err
-}
-
-// GetResponse retrieves an object from an S3 bucket,
-// returning the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading
-func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
- return b.GetResponseWithHeaders(path, make(http.Header))
-}
-
-// GetReaderWithHeaders retrieves an object from an S3 bucket
-// Accepts custom headers to be sent as the second parameter
-// returning the body of the HTTP response.
-// It is the caller's responsibility to call Close on rc when
-// finished reading
-func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) {
- req := &request{
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- err = b.S3.prepare(req)
- if err != nil {
- return nil, err
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- return resp, nil
- }
- panic("unreachable")
-}
-
-// Exists checks whether or not an object exists on an S3 bucket using a HEAD request.
-func (b *Bucket) Exists(path string) (exists bool, err error) {
- req := &request{
- method: "HEAD",
- bucket: b.Name,
- path: path,
- }
- err = b.S3.prepare(req)
- if err != nil {
- return
- }
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
-
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
-
- if err != nil {
- // We can treat a 403 or 404 as non existance
- if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
- return false, nil
- }
- return false, err
- }
-
- if resp.StatusCode/100 == 2 {
- exists = true
- }
- return exists, err
- }
- return false, fmt.Errorf("S3 Currently Unreachable")
-}
-
-// Head HEADs an object in the S3 bucket, returns the response with
-// no body see http://bit.ly/17K1ylI
-func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
- req := &request{
- method: "HEAD",
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- err := b.S3.prepare(req)
- if err != nil {
- return nil, err
- }
-
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- resp, err := b.S3.run(req, nil)
- if shouldRetry(err) && attempt.HasNext() {
- continue
- }
- if err != nil {
- return nil, err
- }
- return resp, err
- }
- return nil, fmt.Errorf("S3 Currently Unreachable")
-}
-
-// Put inserts an object into the S3 bucket.
-//
-// See http://goo.gl/FEBPD for details.
-func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
- body := bytes.NewBuffer(data)
- return b.PutReader(path, body, int64(len(data)), contType, perm, options)
-}
-
-// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
-func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) {
- headers := map[string][]string{
- "x-amz-acl": {string(perm)},
- "x-amz-copy-source": {source},
- }
- options.addHeaders(headers)
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- }
- resp := &CopyObjectResult{}
- err := b.S3.query(req, resp)
- if err != nil {
- return resp, err
- }
- return resp, nil
-}
-
-/*
-PutHeader - like Put, inserts an object into the S3 bucket.
-Instead of Content-Type string, pass in custom headers to override defaults.
-*/
-func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
- body := bytes.NewBuffer(data)
- return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
-}
-
-// PutReader inserts an object into the S3 bucket by consuming data
-// from r until EOF.
-func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- "Content-Type": {contType},
- "x-amz-acl": {string(perm)},
- }
- options.addHeaders(headers)
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- payload: r,
- }
- return b.S3.query(req, nil)
-}
-
-/*
-PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
-Instead of Content-Type string, pass in custom headers to override defaults.
-*/
-func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
- // Default headers
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- "Content-Type": {"application/text"},
- "x-amz-acl": {string(perm)},
- }
-
- // Override with custom headers
- for key, value := range customHeaders {
- headers[key] = value
- }
-
- req := &request{
- method: "PUT",
- bucket: b.Name,
- path: path,
- headers: headers,
- payload: r,
- }
- return b.S3.query(req, nil)
-}
-
-// addHeaders adds o's specified fields to headers
-func (o Options) addHeaders(headers map[string][]string) {
- if o.SSE {
- headers["x-amz-server-side-encryption"] = []string{"AES256"}
- }
- if len(o.ContentEncoding) != 0 {
- headers["Content-Encoding"] = []string{o.ContentEncoding}
- }
- if len(o.CacheControl) != 0 {
- headers["Cache-Control"] = []string{o.CacheControl}
- }
- if len(o.ContentMD5) != 0 {
- headers["Content-MD5"] = []string{o.ContentMD5}
- }
- if len(o.RedirectLocation) != 0 {
- headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation}
- }
- for k, v := range o.Meta {
- headers["x-amz-meta-"+k] = v
- }
-}
-
-// addHeaders adds o's specified fields to headers
-func (o CopyOptions) addHeaders(headers map[string][]string) {
- o.Options.addHeaders(headers)
- if len(o.MetadataDirective) != 0 {
- headers["x-amz-metadata-directive"] = []string{o.MetadataDirective}
- }
- if len(o.ContentType) != 0 {
- headers["Content-Type"] = []string{o.ContentType}
- }
-}
-
-func makeXmlBuffer(doc []byte) *bytes.Buffer {
- buf := new(bytes.Buffer)
- buf.WriteString(xml.Header)
- buf.Write(doc)
- return buf
-}
-
-type RoutingRule struct {
- ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"`
- RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"`
- RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"`
-}
-
-type WebsiteConfiguration struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"`
- IndexDocumentSuffix string `xml:"IndexDocument>Suffix"`
- ErrorDocumentKey string `xml:"ErrorDocument>Key"`
- RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
-}
-
-func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
-
- doc, err := xml.Marshal(configuration)
- if err != nil {
- return err
- }
-
- buf := makeXmlBuffer(doc)
-
- return b.PutBucketSubresource("website", buf, int64(buf.Len()))
-}
-
-func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error {
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(length, 10)},
- }
- req := &request{
- path: "/",
- method: "PUT",
- bucket: b.Name,
- headers: headers,
- payload: r,
- params: url.Values{subresource: {""}},
- }
-
- return b.S3.query(req, nil)
-}
-
-// Del removes an object from the S3 bucket.
-//
-// See http://goo.gl/APeTt for details.
-func (b *Bucket) Del(path string) error {
- req := &request{
- method: "DELETE",
- bucket: b.Name,
- path: path,
- }
- return b.S3.query(req, nil)
-}
-
-type Delete struct {
- Quiet bool `xml:"Quiet,omitempty"`
- Objects []Object `xml:"Object"`
-}
-
-type Object struct {
- Key string `xml:"Key"`
- VersionId string `xml:"VersionId,omitempty"`
-}
-
-// DelMulti removes up to 1000 objects from the S3 bucket.
-//
-// See http://goo.gl/jx6cWK for details.
-func (b *Bucket) DelMulti(objects Delete) error {
- doc, err := xml.Marshal(objects)
- if err != nil {
- return err
- }
-
- buf := makeXmlBuffer(doc)
- digest := md5.New()
- size, err := digest.Write(buf.Bytes())
- if err != nil {
- return err
- }
-
- headers := map[string][]string{
- "Content-Length": {strconv.FormatInt(int64(size), 10)},
- "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
- "Content-Type": {"text/xml"},
- }
- req := &request{
- path: "/",
- method: "POST",
- params: url.Values{"delete": {""}},
- bucket: b.Name,
- headers: headers,
- payload: buf,
- }
-
- return b.S3.query(req, nil)
-}
-
-// The ListResp type holds the results of a List bucket operation.
-type ListResp struct {
- Name string
- Prefix string
- Delimiter string
- Marker string
- NextMarker string
- MaxKeys int
-
- // IsTruncated is true if the results have been truncated because
- // there are more keys and prefixes than can fit in MaxKeys.
- // N.B. this is the opposite sense to that documented (incorrectly) in
- // http://goo.gl/YjQTc
- IsTruncated bool
- Contents []Key
- CommonPrefixes []string `xml:">Prefix"`
-}
-
-// The Key type represents an item stored in an S3 bucket.
-type Key struct {
- Key string
- LastModified string
- Size int64
- // ETag gives the hex-encoded MD5 sum of the contents,
- // surrounded with double-quotes.
- ETag string
- StorageClass string
- Owner Owner
-}
-
-// List returns information about objects in an S3 bucket.
-//
-// The prefix parameter limits the response to keys that begin with the
-// specified prefix.
-//
-// The delim parameter causes the response to group all of the keys that
-// share a common prefix up to the next delimiter in a single entry within
-// the CommonPrefixes field. You can use delimiters to separate a bucket
-// into different groupings of keys, similar to how folders would work.
-//
-// The marker parameter specifies the key to start with when listing objects
-// in a bucket. Amazon S3 lists objects in alphabetical order and
-// will return keys alphabetically greater than the marker.
-//
-// The max parameter specifies how many keys + common prefixes to return in
-// the response. The default is 1000.
-//
-// For example, given these keys in a bucket:
-//
-// index.html
-// index2.html
-// photos/2006/January/sample.jpg
-// photos/2006/February/sample2.jpg
-// photos/2006/February/sample3.jpg
-// photos/2006/February/sample4.jpg
-//
-// Listing this bucket with delimiter set to "/" would yield the
-// following result:
-//
-// &ListResp{
-// Name: "sample-bucket",
-// MaxKeys: 1000,
-// Delimiter: "/",
-// Contents: []Key{
-// {Key: "index.html", "index2.html"},
-// },
-// CommonPrefixes: []string{
-// "photos/",
-// },
-// }
-//
-// Listing the same bucket with delimiter set to "/" and prefix set to
-// "photos/2006/" would yield the following result:
-//
-// &ListResp{
-// Name: "sample-bucket",
-// MaxKeys: 1000,
-// Delimiter: "/",
-// Prefix: "photos/2006/",
-// CommonPrefixes: []string{
-// "photos/2006/February/",
-// "photos/2006/January/",
-// },
-// }
-//
-// See http://goo.gl/YjQTc for details.
-func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
- params := map[string][]string{
- "prefix": {prefix},
- "delimiter": {delim},
- "marker": {marker},
- }
- if max != 0 {
- params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
- }
- req := &request{
- bucket: b.Name,
- params: params,
- }
- result = &ListResp{}
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, result)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return result, nil
-}
-
-// The VersionsResp type holds the results of a list bucket Versions operation.
-type VersionsResp struct {
- Name string
- Prefix string
- KeyMarker string
- VersionIdMarker string
- MaxKeys int
- Delimiter string
- IsTruncated bool
- Versions []Version
- CommonPrefixes []string `xml:">Prefix"`
-}
-
-// The Version type represents an object version stored in an S3 bucket.
-type Version struct {
- Key string
- VersionId string
- IsLatest bool
- LastModified string
- // ETag gives the hex-encoded MD5 sum of the contents,
- // surrounded with double-quotes.
- ETag string
- Size int64
- Owner Owner
- StorageClass string
-}
-
-func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
- params := map[string][]string{
- "versions": {""},
- "prefix": {prefix},
- "delimiter": {delim},
- }
-
- if len(versionIdMarker) != 0 {
- params["version-id-marker"] = []string{versionIdMarker}
- }
- if len(keyMarker) != 0 {
- params["key-marker"] = []string{keyMarker}
- }
-
- if max != 0 {
- params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
- }
- req := &request{
- bucket: b.Name,
- params: params,
- }
- result = &VersionsResp{}
- for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); {
- err = b.S3.query(req, result)
- if !shouldRetry(err) {
- break
- }
- }
- if err != nil {
- return nil, err
- }
- return result, nil
-}
-
-// Returns a mapping of all key names in this bucket to Key objects
-func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
- bucket_contents := map[string]Key{}
- prefix := ""
- path_separator := ""
- marker := ""
- for {
- contents, err := b.List(prefix, path_separator, marker, 1000)
- if err != nil {
- return &bucket_contents, err
- }
- for _, key := range contents.Contents {
- bucket_contents[key.Key] = key
- }
- if contents.IsTruncated {
- marker = contents.NextMarker
- } else {
- break
- }
- }
-
- return &bucket_contents, nil
-}
-
-// URL returns a non-signed URL that allows retriving the
-// object at path. It only works if the object is publicly
-// readable (see SignedURL).
-func (b *Bucket) URL(path string) string {
- req := &request{
- bucket: b.Name,
- path: path,
- }
- err := b.S3.prepare(req)
- if err != nil {
- panic(err)
- }
- u, err := req.url()
- if err != nil {
- panic(err)
- }
- u.RawQuery = ""
- return u.String()
-}
-
-// SignedURL returns a signed URL that allows anyone holding the URL
-// to retrieve the object at path. The signature is valid until expires.
-func (b *Bucket) SignedURL(path string, expires time.Time) string {
- req := &request{
- bucket: b.Name,
- path: path,
- params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
- }
- err := b.S3.prepare(req)
- if err != nil {
- panic(err)
- }
- u, err := req.url()
- if err != nil {
- panic(err)
- }
- if b.S3.Auth.Token() != "" {
- return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0])
- } else {
- return u.String()
- }
-}
-
-// UploadSignedURL returns a signed URL that allows anyone holding the URL
-// to upload the object at path. The signature is valid until expires.
-// contenttype is a string like image/png
-// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself]
-func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string {
- expire_date := expires.Unix()
- if method != "POST" {
- method = "PUT"
- }
- stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path
- fmt.Println("String to sign:\n", stringToSign)
- a := b.S3.Auth
- secretKey := a.SecretKey
- accessId := a.AccessKey
- mac := hmac.New(sha1.New, []byte(secretKey))
- mac.Write([]byte(stringToSign))
- macsum := mac.Sum(nil)
- signature := base64.StdEncoding.EncodeToString([]byte(macsum))
- signature = strings.TrimSpace(signature)
-
- signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/")
- if err != nil {
- log.Println("ERROR sining url for S3 upload", err)
- return ""
- }
- signedurl.Path += path
- params := url.Values{}
- params.Add("AWSAccessKeyId", accessId)
- params.Add("Expires", strconv.FormatInt(expire_date, 10))
- params.Add("Signature", signature)
- if a.Token() != "" {
- params.Add("token", a.Token())
- }
-
- signedurl.RawQuery = params.Encode()
- return signedurl.String()
-}
-
-// PostFormArgs returns the action and input fields needed to allow anonymous
-// uploads to a bucket within the expiration limit
-func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) {
- conditions := make([]string, 0)
- fields = map[string]string{
- "AWSAccessKeyId": b.Auth.AccessKey,
- "key": path,
- }
-
- conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path))
- conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name))
- if redirect != "" {
- conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect))
- fields["success_action_redirect"] = redirect
- }
-
- vExpiration := expires.Format("2006-01-02T15:04:05Z")
- vConditions := strings.Join(conditions, ",")
- policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions)
- policy64 := base64.StdEncoding.EncodeToString([]byte(policy))
- fields["policy"] = policy64
-
- signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey))
- signer.Write([]byte(policy64))
- fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil))
-
- action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name)
- return
-}
-
-type request struct {
- method string
- bucket string
- path string
- signpath string
- params url.Values
- headers http.Header
- baseurl string
- payload io.Reader
- prepared bool
-}
-
-func (req *request) url() (*url.URL, error) {
- u, err := url.Parse(req.baseurl)
- if err != nil {
- return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
- }
- u.RawQuery = req.params.Encode()
- u.Path = req.path
- return u, nil
-}
-
-// query prepares and runs the req request.
-// If resp is not nil, the XML data contained in the response
-// body will be unmarshalled on it.
-func (s3 *S3) query(req *request, resp interface{}) error {
- err := s3.prepare(req)
- if err == nil {
- var httpResponse *http.Response
- httpResponse, err = s3.run(req, resp)
- if resp == nil && httpResponse != nil {
- httpResponse.Body.Close()
- }
- }
- return err
-}
-
-// prepare sets up req to be delivered to S3.
-func (s3 *S3) prepare(req *request) error {
- var signpath = req.path
-
- if !req.prepared {
- req.prepared = true
- if req.method == "" {
- req.method = "GET"
- }
- // Copy so they can be mutated without affecting on retries.
- params := make(url.Values)
- headers := make(http.Header)
- for k, v := range req.params {
- params[k] = v
- }
- for k, v := range req.headers {
- headers[k] = v
- }
- req.params = params
- req.headers = headers
- if !strings.HasPrefix(req.path, "/") {
- req.path = "/" + req.path
- }
- signpath = req.path
- if req.bucket != "" {
- req.baseurl = s3.Region.S3BucketEndpoint
- if req.baseurl == "" {
- // Use the path method to address the bucket.
- req.baseurl = s3.Region.S3Endpoint
- req.path = "/" + req.bucket + req.path
- } else {
- // Just in case, prevent injection.
- if strings.IndexAny(req.bucket, "/:@") >= 0 {
- return fmt.Errorf("bad S3 bucket: %q", req.bucket)
- }
- req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
- }
- signpath = "/" + req.bucket + signpath
- }
- }
-
- // Always sign again as it's not clear how far the
- // server has handled a previous attempt.
- u, err := url.Parse(req.baseurl)
- if err != nil {
- return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
- }
- reqSignpathSpaceFix := (&url.URL{Path: signpath}).String()
- req.headers["Host"] = []string{u.Host}
- req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
- if s3.Auth.Token() != "" {
- req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()}
- }
- sign(s3.Auth, req.method, reqSignpathSpaceFix, req.params, req.headers)
- return nil
-}
-
-// run sends req and returns the http response from the server.
-// If resp is not nil, the XML data contained in the response
-// body will be unmarshalled on it.
-func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
- if debug {
- log.Printf("Running S3 request: %#v", req)
- }
-
- u, err := req.url()
- if err != nil {
- return nil, err
- }
-
- hreq := http.Request{
- URL: u,
- Method: req.method,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Close: true,
- Header: req.headers,
- }
-
- if v, ok := req.headers["Content-Length"]; ok {
- hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
- delete(req.headers, "Content-Length")
- }
- if req.payload != nil {
- hreq.Body = ioutil.NopCloser(req.payload)
- }
-
- if s3.client == nil {
- s3.client = &http.Client{
- Transport: &http.Transport{
- Dial: func(netw, addr string) (c net.Conn, err error) {
- c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout)
- if err != nil {
- return
- }
-
- var deadline time.Time
- if s3.RequestTimeout > 0 {
- deadline = time.Now().Add(s3.RequestTimeout)
- c.SetDeadline(deadline)
- }
-
- if s3.ReadTimeout > 0 || s3.WriteTimeout > 0 {
- c = &ioTimeoutConn{
- TCPConn: c.(*net.TCPConn),
- readTimeout: s3.ReadTimeout,
- writeTimeout: s3.WriteTimeout,
- requestDeadline: deadline,
- }
- }
- return
- },
- },
- }
- }
-
- hresp, err := s3.client.Do(&hreq)
- if err != nil {
- return nil, err
- }
- if debug {
- dump, _ := httputil.DumpResponse(hresp, true)
- log.Printf("} -> %s\n", dump)
- }
- if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 {
- defer hresp.Body.Close()
- return nil, buildError(hresp)
- }
- if resp != nil {
- err = xml.NewDecoder(hresp.Body).Decode(resp)
- hresp.Body.Close()
- if debug {
- log.Printf("goamz.s3> decoded xml into %#v", resp)
- }
- }
- return hresp, err
-}
-
-// Error represents an error in an operation with S3.
-type Error struct {
- StatusCode int // HTTP status code (200, 403, ...)
- Code string // EC2 error code ("UnsupportedOperation", ...)
- Message string // The human-oriented error message
- BucketName string
- RequestId string
- HostId string
-}
-
-func (e *Error) Error() string {
- return e.Message
-}
-
-func buildError(r *http.Response) error {
- if debug {
- log.Printf("got error (status code %v)", r.StatusCode)
- data, err := ioutil.ReadAll(r.Body)
- if err != nil {
- log.Printf("\tread error: %v", err)
- } else {
- log.Printf("\tdata:\n%s\n\n", data)
- }
- r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
- }
-
- err := Error{}
- // TODO return error if Unmarshal fails?
- xml.NewDecoder(r.Body).Decode(&err)
- r.Body.Close()
- err.StatusCode = r.StatusCode
- if err.Message == "" {
- err.Message = r.Status
- }
- if debug {
- log.Printf("err: %#v\n", err)
- }
- return &err
-}
-
-func shouldRetry(err error) bool {
- if err == nil {
- return false
- }
- if e, ok := err.(*url.Error); ok {
- // Transport returns this string if it detects a write on a connection which
- // has already had an error
- if e.Err.Error() == "http: can't write HTTP request on broken connection" {
- return true
- }
- err = e.Err
- }
-
- switch err {
- case io.ErrUnexpectedEOF, io.EOF:
- return true
- }
- switch e := err.(type) {
- case *net.DNSError:
- return true
- case *net.OpError:
- switch e.Op {
- case "read", "write", "WSARecv", "WSASend", "ConnectEx":
- return true
- }
- case *Error:
- switch e.Code {
- case "InternalError", "NoSuchUpload", "NoSuchBucket", "RequestTimeout":
- return true
- }
- // let's handle tls handshake timeout issues and similar temporary errors
- case net.Error:
- return e.Temporary()
- }
-
- return false
-}
-
-func hasCode(err error, code string) bool {
- s3err, ok := err.(*Error)
- return ok && s3err.Code == code
-}
-
-// ioTimeoutConn is a net.Conn which sets a deadline for each Read or Write operation
-type ioTimeoutConn struct {
- *net.TCPConn
- readTimeout time.Duration
- writeTimeout time.Duration
- requestDeadline time.Time
-}
-
-func (c *ioTimeoutConn) deadline(timeout time.Duration) time.Time {
- dl := time.Now().Add(timeout)
- if c.requestDeadline.IsZero() || dl.Before(c.requestDeadline) {
- return dl
- }
-
- return c.requestDeadline
-}
-
-func (c *ioTimeoutConn) Read(b []byte) (int, error) {
- if c.readTimeout > 0 {
- err := c.TCPConn.SetReadDeadline(c.deadline(c.readTimeout))
- if err != nil {
- return 0, err
- }
- }
- return c.TCPConn.Read(b)
-}
-
-func (c *ioTimeoutConn) Write(b []byte) (int, error) {
- if c.writeTimeout > 0 {
- err := c.TCPConn.SetWriteDeadline(c.deadline(c.writeTimeout))
- if err != nil {
- return 0, err
- }
- }
- return c.TCPConn.Write(b)
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go
deleted file mode 100644
index 2016b5659..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go
+++ /dev/null
@@ -1,633 +0,0 @@
-package s3test
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "encoding/hex"
- "encoding/xml"
- "fmt"
- "github.com/goamz/goamz/s3"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-const debug = false
-
-type s3Error struct {
- statusCode int
- XMLName struct{} `xml:"Error"`
- Code string
- Message string
- BucketName string
- RequestId string
- HostId string
-}
-
-type action struct {
- srv *Server
- w http.ResponseWriter
- req *http.Request
- reqId string
-}
-
-// Config controls the internal behaviour of the Server. A nil config is the default
-// and behaves as if all configurations assume their default behaviour. Once passed
-// to NewServer, the configuration must not be modified.
-type Config struct {
- // Send409Conflict controls how the Server will respond to calls to PUT on a
- // previously existing bucket. The default is false, and corresponds to the
- // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
- // all other regions.
- // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
- Send409Conflict bool
-}
-
-func (c *Config) send409Conflict() bool {
- if c != nil {
- return c.Send409Conflict
- }
- return false
-}
-
-// Server is a fake S3 server for testing purposes.
-// All of the data for the server is kept in memory.
-type Server struct {
- url string
- reqId int
- listener net.Listener
- mu sync.Mutex
- buckets map[string]*bucket
- config *Config
-}
-
-type bucket struct {
- name string
- acl s3.ACL
- ctime time.Time
- objects map[string]*object
-}
-
-type object struct {
- name string
- mtime time.Time
- meta http.Header // metadata to return with requests.
- checksum []byte // also held as Content-MD5 in meta.
- data []byte
-}
-
-// A resource encapsulates the subject of an HTTP request.
-// The resource referred to may or may not exist
-// when the request is made.
-type resource interface {
- put(a *action) interface{}
- get(a *action) interface{}
- post(a *action) interface{}
- delete(a *action) interface{}
-}
-
-func NewServer(config *Config) (*Server, error) {
- l, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- return nil, fmt.Errorf("cannot listen on localhost: %v", err)
- }
- srv := &Server{
- listener: l,
- url: "http://" + l.Addr().String(),
- buckets: make(map[string]*bucket),
- config: config,
- }
- go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- srv.serveHTTP(w, req)
- }))
- return srv, nil
-}
-
-// Quit closes down the server.
-func (srv *Server) Quit() {
- srv.listener.Close()
-}
-
-// URL returns a URL for the server.
-func (srv *Server) URL() string {
- return srv.url
-}
-
-func fatalf(code int, codeStr string, errf string, a ...interface{}) {
- panic(&s3Error{
- statusCode: code,
- Code: codeStr,
- Message: fmt.Sprintf(errf, a...),
- })
-}
-
-// serveHTTP serves the S3 protocol.
-func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
- // ignore error from ParseForm as it's usually spurious.
- req.ParseForm()
-
- srv.mu.Lock()
- defer srv.mu.Unlock()
-
- if debug {
- log.Printf("s3test %q %q", req.Method, req.URL)
- }
- a := &action{
- srv: srv,
- w: w,
- req: req,
- reqId: fmt.Sprintf("%09X", srv.reqId),
- }
- srv.reqId++
-
- var r resource
- defer func() {
- switch err := recover().(type) {
- case *s3Error:
- switch r := r.(type) {
- case objectResource:
- err.BucketName = r.bucket.name
- case bucketResource:
- err.BucketName = r.name
- }
- err.RequestId = a.reqId
- // TODO HostId
- w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
- w.WriteHeader(err.statusCode)
- xmlMarshal(w, err)
- case nil:
- default:
- panic(err)
- }
- }()
-
- r = srv.resourceForURL(req.URL)
-
- var resp interface{}
- switch req.Method {
- case "PUT":
- resp = r.put(a)
- case "GET", "HEAD":
- resp = r.get(a)
- case "DELETE":
- resp = r.delete(a)
- case "POST":
- resp = r.post(a)
- default:
- fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
- }
- if resp != nil && req.Method != "HEAD" {
- xmlMarshal(w, resp)
- }
-}
-
-// xmlMarshal is the same as xml.Marshal except that
-// it panics on error. The marshalling should not fail,
-// but we want to know if it does.
-func xmlMarshal(w io.Writer, x interface{}) {
- if err := xml.NewEncoder(w).Encode(x); err != nil {
- panic(fmt.Errorf("error marshalling %#v: %v", x, err))
- }
-}
-
-// In a fully implemented test server, each of these would have
-// its own resource type.
-var unimplementedBucketResourceNames = map[string]bool{
- "acl": true,
- "lifecycle": true,
- "policy": true,
- "location": true,
- "logging": true,
- "notification": true,
- "versions": true,
- "requestPayment": true,
- "versioning": true,
- "website": true,
- "uploads": true,
-}
-
-var unimplementedObjectResourceNames = map[string]bool{
- "uploadId": true,
- "acl": true,
- "torrent": true,
- "uploads": true,
-}
-
-var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
-
-// resourceForURL returns a resource object for the given URL.
-func (srv *Server) resourceForURL(u *url.URL) (r resource) {
- m := pathRegexp.FindStringSubmatch(u.Path)
- if m == nil {
- fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
- }
- bucketName := m[2]
- objectName := m[4]
- if bucketName == "" {
- return nullResource{} // root
- }
- b := bucketResource{
- name: bucketName,
- bucket: srv.buckets[bucketName],
- }
- q := u.Query()
- if objectName == "" {
- for name := range q {
- if unimplementedBucketResourceNames[name] {
- return nullResource{}
- }
- }
- return b
-
- }
- if b.bucket == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- objr := objectResource{
- name: objectName,
- version: q.Get("versionId"),
- bucket: b.bucket,
- }
- for name := range q {
- if unimplementedObjectResourceNames[name] {
- return nullResource{}
- }
- }
- if obj := objr.bucket.objects[objr.name]; obj != nil {
- objr.object = obj
- }
- return objr
-}
-
-// nullResource has error stubs for all resource methods.
-type nullResource struct{}
-
-func notAllowed() interface{} {
- fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
- return nil
-}
-
-func (nullResource) put(a *action) interface{} { return notAllowed() }
-func (nullResource) get(a *action) interface{} { return notAllowed() }
-func (nullResource) post(a *action) interface{} { return notAllowed() }
-func (nullResource) delete(a *action) interface{} { return notAllowed() }
-
-const timeFormat = "2006-01-02T15:04:05.000Z07:00"
-
-type bucketResource struct {
- name string
- bucket *bucket // non-nil if the bucket already exists.
-}
-
-// GET on a bucket lists the objects in the bucket.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
-func (r bucketResource) get(a *action) interface{} {
- if r.bucket == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- delimiter := a.req.Form.Get("delimiter")
- marker := a.req.Form.Get("marker")
- maxKeys := -1
- if s := a.req.Form.Get("max-keys"); s != "" {
- i, err := strconv.Atoi(s)
- if err != nil || i < 0 {
- fatalf(400, "invalid value for max-keys: %q", s)
- }
- maxKeys = i
- }
- prefix := a.req.Form.Get("prefix")
- a.w.Header().Set("Content-Type", "application/xml")
-
- if a.req.Method == "HEAD" {
- return nil
- }
-
- var objs orderedObjects
-
- // first get all matching objects and arrange them in alphabetical order.
- for name, obj := range r.bucket.objects {
- if strings.HasPrefix(name, prefix) {
- objs = append(objs, obj)
- }
- }
- sort.Sort(objs)
-
- if maxKeys <= 0 {
- maxKeys = 1000
- }
- resp := &s3.ListResp{
- Name: r.bucket.name,
- Prefix: prefix,
- Delimiter: delimiter,
- Marker: marker,
- MaxKeys: maxKeys,
- }
-
- var prefixes []string
- for _, obj := range objs {
- if !strings.HasPrefix(obj.name, prefix) {
- continue
- }
- name := obj.name
- isPrefix := false
- if delimiter != "" {
- if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
- name = obj.name[:len(prefix)+i+len(delimiter)]
- if prefixes != nil && prefixes[len(prefixes)-1] == name {
- continue
- }
- isPrefix = true
- }
- }
- if name <= marker {
- continue
- }
- if len(resp.Contents)+len(prefixes) >= maxKeys {
- resp.IsTruncated = true
- break
- }
- if isPrefix {
- prefixes = append(prefixes, name)
- } else {
- // Contents contains only keys not found in CommonPrefixes
- resp.Contents = append(resp.Contents, obj.s3Key())
- }
- }
- resp.CommonPrefixes = prefixes
- return resp
-}
-
-// orderedObjects holds a slice of objects that can be sorted
-// by name.
-type orderedObjects []*object
-
-func (s orderedObjects) Len() int {
- return len(s)
-}
-func (s orderedObjects) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s orderedObjects) Less(i, j int) bool {
- return s[i].name < s[j].name
-}
-
-func (obj *object) s3Key() s3.Key {
- return s3.Key{
- Key: obj.name,
- LastModified: obj.mtime.Format(timeFormat),
- Size: int64(len(obj.data)),
- ETag: fmt.Sprintf(`"%x"`, obj.checksum),
- // TODO StorageClass
- // TODO Owner
- }
-}
-
-// DELETE on a bucket deletes the bucket if it's not empty.
-func (r bucketResource) delete(a *action) interface{} {
- b := r.bucket
- if b == nil {
- fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
- }
- if len(b.objects) > 0 {
- fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
- }
- delete(a.srv.buckets, b.name)
- return nil
-}
-
-// PUT on a bucket creates the bucket.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
-func (r bucketResource) put(a *action) interface{} {
- var created bool
- if r.bucket == nil {
- if !validBucketName(r.name) {
- fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
- }
- if loc := locationConstraint(a); loc == "" {
- fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
- }
- // TODO validate acl
- r.bucket = &bucket{
- name: r.name,
- // TODO default acl
- objects: make(map[string]*object),
- }
- a.srv.buckets[r.name] = r.bucket
- created = true
- }
- if !created && a.srv.config.send409Conflict() {
- fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
- }
- r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
- return nil
-}
-
-func (bucketResource) post(a *action) interface{} {
- fatalf(400, "Method", "bucket POST method not available")
- return nil
-}
-
-// validBucketName returns whether name is a valid bucket name.
-// Here are the rules, from:
-// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
-//
-// Can contain lowercase letters, numbers, periods (.), underscores (_),
-// and dashes (-). You can use uppercase letters for buckets only in the
-// US Standard region.
-//
-// Must start with a number or letter
-//
-// Must be between 3 and 255 characters long
-//
-// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
-// but the real S3 server does not seem to check that rule, so we will not
-// check it either.
-//
-func validBucketName(name string) bool {
- if len(name) < 3 || len(name) > 255 {
- return false
- }
- r := name[0]
- if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
- return false
- }
- for _, r := range name {
- switch {
- case r >= '0' && r <= '9':
- case r >= 'a' && r <= 'z':
- case r == '_' || r == '-':
- case r == '.':
- default:
- return false
- }
- }
- return true
-}
-
-var responseParams = map[string]bool{
- "content-type": true,
- "content-language": true,
- "expires": true,
- "cache-control": true,
- "content-disposition": true,
- "content-encoding": true,
-}
-
-type objectResource struct {
- name string
- version string
- bucket *bucket // always non-nil.
- object *object // may be nil.
-}
-
-// GET on an object gets the contents of the object.
-// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
-func (objr objectResource) get(a *action) interface{} {
- obj := objr.object
- if obj == nil {
- fatalf(404, "NoSuchKey", "The specified key does not exist.")
- }
- h := a.w.Header()
- // add metadata
- for name, d := range obj.meta {
- h[name] = d
- }
- // override header values in response to request parameters.
- for name, vals := range a.req.Form {
- if strings.HasPrefix(name, "response-") {
- name = name[len("response-"):]
- if !responseParams[name] {
- continue
- }
- h.Set(name, vals[0])
- }
- }
- if r := a.req.Header.Get("Range"); r != "" {
- fatalf(400, "NotImplemented", "range unimplemented")
- }
- // TODO Last-Modified-Since
- // TODO If-Modified-Since
- // TODO If-Unmodified-Since
- // TODO If-Match
- // TODO If-None-Match
- // TODO Connection: close ??
- // TODO x-amz-request-id
- h.Set("Content-Length", fmt.Sprint(len(obj.data)))
- h.Set("ETag", hex.EncodeToString(obj.checksum))
- h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
- if a.req.Method == "HEAD" {
- return nil
- }
- // TODO avoid holding the lock when writing data.
- _, err := a.w.Write(obj.data)
- if err != nil {
- // we can't do much except just log the fact.
- log.Printf("error writing data: %v", err)
- }
- return nil
-}
-
-var metaHeaders = map[string]bool{
- "Content-MD5": true,
- "x-amz-acl": true,
- "Content-Type": true,
- "Content-Encoding": true,
- "Content-Disposition": true,
-}
-
-// PUT on an object creates the object.
-func (objr objectResource) put(a *action) interface{} {
- // TODO Cache-Control header
- // TODO Expires header
- // TODO x-amz-server-side-encryption
- // TODO x-amz-storage-class
-
- // TODO is this correct, or should we erase all previous metadata?
- obj := objr.object
- if obj == nil {
- obj = &object{
- name: objr.name,
- meta: make(http.Header),
- }
- }
-
- var expectHash []byte
- if c := a.req.Header.Get("Content-MD5"); c != "" {
- var err error
- expectHash, err = base64.StdEncoding.DecodeString(c)
- if err != nil || len(expectHash) != md5.Size {
- fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
- }
- }
- sum := md5.New()
- // TODO avoid holding lock while reading data.
- data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
- if err != nil {
- fatalf(400, "TODO", "read error")
- }
- gotHash := sum.Sum(nil)
- if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
- fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
- }
- if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
- fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
- }
-
- // PUT request has been successful - save data and metadata
- for key, values := range a.req.Header {
- key = http.CanonicalHeaderKey(key)
- if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
- obj.meta[key] = values
- }
- }
- obj.data = data
- obj.checksum = gotHash
- obj.mtime = time.Now()
- objr.bucket.objects[objr.name] = obj
-
- h := a.w.Header()
- h.Set("ETag", fmt.Sprintf(`"%s"`, hex.EncodeToString(obj.checksum)))
-
- return nil
-}
-
-func (objr objectResource) delete(a *action) interface{} {
- delete(objr.bucket.objects, objr.name)
- return nil
-}
-
-func (objr objectResource) post(a *action) interface{} {
- fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
- return nil
-}
-
-type CreateBucketConfiguration struct {
- LocationConstraint string
-}
-
-// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
-// If there is no body, an empty string will be returned.
-func locationConstraint(a *action) string {
- var body bytes.Buffer
- if _, err := io.Copy(&body, a.req.Body); err != nil {
- fatalf(400, "InvalidRequest", err.Error())
- }
- if body.Len() == 0 {
- return ""
- }
- var loc CreateBucketConfiguration
- if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
- fatalf(400, "InvalidRequest", err.Error())
- }
- return loc.LocationConstraint
-}
diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go
deleted file mode 100644
index 722d97d29..000000000
--- a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package s3
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "encoding/base64"
- "github.com/goamz/goamz/aws"
- "log"
- "sort"
- "strings"
-)
-
-var b64 = base64.StdEncoding
-
-// ----------------------------------------------------------------------------
-// S3 signing (http://goo.gl/G1LrK)
-
-var s3ParamsToSign = map[string]bool{
- "acl": true,
- "location": true,
- "logging": true,
- "notification": true,
- "partNumber": true,
- "policy": true,
- "requestPayment": true,
- "torrent": true,
- "uploadId": true,
- "uploads": true,
- "versionId": true,
- "versioning": true,
- "versions": true,
- "response-content-type": true,
- "response-content-language": true,
- "response-expires": true,
- "response-cache-control": true,
- "response-content-disposition": true,
- "response-content-encoding": true,
- "website": true,
- "delete": true,
-}
-
-type keySortableTupleList []keySortableTuple
-
-type keySortableTuple struct {
- Key string
- TupleString string
-}
-
-func (l keySortableTupleList) StringSlice() []string {
- slice := make([]string, len(l))
- for i, v := range l {
- slice[i] = v.TupleString
- }
- return slice
-}
-
-func (l keySortableTupleList) Len() int {
- return len(l)
-}
-
-func (l keySortableTupleList) Less(i, j int) bool {
- return l[i].Key < l[j].Key
-}
-
-func (l keySortableTupleList) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
- var md5, ctype, date, xamz string
- var xamzDate bool
- var sarray keySortableTupleList
- for k, v := range headers {
- k = strings.ToLower(k)
- switch k {
- case "content-md5":
- md5 = v[0]
- case "content-type":
- ctype = v[0]
- case "date":
- if !xamzDate {
- date = v[0]
- }
- default:
- if strings.HasPrefix(k, "x-amz-") {
- vall := strings.Join(v, ",")
- sarray = append(sarray, keySortableTuple{k, k + ":" + vall})
- if k == "x-amz-date" {
- xamzDate = true
- date = ""
- }
- }
- }
- }
- if len(sarray) > 0 {
- sort.Sort(sarray)
- xamz = strings.Join(sarray.StringSlice(), "\n") + "\n"
- }
-
- expires := false
- if v, ok := params["Expires"]; ok {
- // Query string request authentication alternative.
- expires = true
- date = v[0]
- params["AWSAccessKeyId"] = []string{auth.AccessKey}
- }
-
- sarray = sarray[0:0]
- for k, v := range params {
- if s3ParamsToSign[k] {
- for _, vi := range v {
- if vi == "" {
- sarray = append(sarray, keySortableTuple{k, k})
- } else {
- // "When signing you do not encode these values."
- sarray = append(sarray, keySortableTuple{k, k + "=" + vi})
- }
- }
- }
- }
- if len(sarray) > 0 {
- sort.Sort(sarray)
- canonicalPath = canonicalPath + "?" + strings.Join(sarray.StringSlice(), "&")
- }
-
- payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
- hash := hmac.New(sha1.New, []byte(auth.SecretKey))
- hash.Write([]byte(payload))
- signature := make([]byte, b64.EncodedLen(hash.Size()))
- b64.Encode(signature, hash.Sum(nil))
-
- if expires {
- params["Signature"] = []string{string(signature)}
- } else {
- headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
- }
- if debug {
- log.Printf("Signature payload: %q", payload)
- log.Printf("Signature: %q", signature)
- }
-}