From 56e74239d6b34df8f30ef046f0b0ff4ff0866a71 Mon Sep 17 00:00:00 2001 From: =Corey Hulen Date: Sun, 14 Jun 2015 23:53:32 -0800 Subject: first commit --- Godeps/Godeps.json | 156 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../p/draw2d/draw2d/advanced_path.go | 42 + .../src/code.google.com/p/draw2d/draw2d/arc.go | 67 + .../code.google.com/p/draw2d/draw2d/curve/Makefile | 11 + .../code.google.com/p/draw2d/draw2d/curve/arc.go | 36 + .../p/draw2d/draw2d/curve/cubic_float64.go | 67 + .../p/draw2d/draw2d/curve/cubic_float64_others.go | 696 ++++ .../p/draw2d/draw2d/curve/curve_test.go | 262 ++ .../p/draw2d/draw2d/curve/quad_float64.go | 51 + .../src/code.google.com/p/draw2d/draw2d/curves.go | 336 ++ .../src/code.google.com/p/draw2d/draw2d/dasher.go | 90 + .../p/draw2d/draw2d/demux_converter.go | 23 + .../src/code.google.com/p/draw2d/draw2d/doc.go | 5 + .../src/code.google.com/p/draw2d/draw2d/font.go | 97 + .../src/code.google.com/p/draw2d/draw2d/gc.go | 55 + .../src/code.google.com/p/draw2d/draw2d/image.go | 359 +++ .../src/code.google.com/p/draw2d/draw2d/math.go | 52 + .../src/code.google.com/p/draw2d/draw2d/paint.go | 92 + .../src/code.google.com/p/draw2d/draw2d/path.go | 27 + .../code.google.com/p/draw2d/draw2d/path_adder.go | 70 + .../p/draw2d/draw2d/path_converter.go | 173 + .../p/draw2d/draw2d/path_storage.go | 190 ++ .../p/draw2d/draw2d/raster/coverage_table.go | 203 ++ .../p/draw2d/draw2d/raster/fillerAA.go | 320 ++ .../p/draw2d/draw2d/raster/fillerV1/fillerAA.go | 303 ++ .../p/draw2d/draw2d/raster/fillerV2/fillerAA.go | 320 ++ .../p/draw2d/draw2d/raster/fixed_point.go | 17 + .../code.google.com/p/draw2d/draw2d/raster/line.go | 55 + .../p/draw2d/draw2d/raster/polygon.go | 581 ++++ .../p/draw2d/draw2d/raster/raster_test.go | 200 ++ .../p/draw2d/draw2d/rgba_interpolation.go | 150 + .../code.google.com/p/draw2d/draw2d/stack_gc.go | 208 ++ .../src/code.google.com/p/draw2d/draw2d/stroker.go | 135 + .../code.google.com/p/draw2d/draw2d/transform.go | 306 ++ .../code.google.com/p/draw2d/draw2d/vertex2d.go | 19 + .../p/freetype-go/freetype/raster/geom.go | 280 ++ .../p/freetype-go/freetype/raster/paint.go | 292 ++ .../p/freetype-go/freetype/raster/raster.go | 579 ++++ .../p/freetype-go/freetype/raster/stroke.go | 466 +++ .../p/freetype-go/freetype/truetype/glyph.go | 530 ++++ .../p/freetype-go/freetype/truetype/hint.go | 1764 +++++++++++ .../p/freetype-go/freetype/truetype/hint_test.go | 673 ++++ .../p/freetype-go/freetype/truetype/opcodes.go | 289 ++ .../p/freetype-go/freetype/truetype/truetype.go | 554 ++++ .../freetype-go/freetype/truetype/truetype_test.go | 366 +++ .../src/code.google.com/p/go-uuid/uuid/LICENSE | 27 + .../src/code.google.com/p/go-uuid/uuid/dce.go | 84 + .../src/code.google.com/p/go-uuid/uuid/doc.go | 8 + .../src/code.google.com/p/go-uuid/uuid/hash.go | 53 + .../src/code.google.com/p/go-uuid/uuid/json.go | 30 + .../code.google.com/p/go-uuid/uuid/json_test.go | 32 + .../src/code.google.com/p/go-uuid/uuid/node.go | 101 + .../src/code.google.com/p/go-uuid/uuid/seq_test.go | 66 + .../src/code.google.com/p/go-uuid/uuid/time.go | 132 + .../src/code.google.com/p/go-uuid/uuid/util.go | 43 + .../src/code.google.com/p/go-uuid/uuid/uuid.go | 163 + .../code.google.com/p/go-uuid/uuid/uuid_test.go | 390 +++ .../src/code.google.com/p/go-uuid/uuid/version1.go | 41 + .../src/code.google.com/p/go-uuid/uuid/version4.go | 25 + .../code.google.com/p/go.crypto/bcrypt/base64.go | 35 + .../code.google.com/p/go.crypto/bcrypt/bcrypt.go | 294 ++ .../p/go.crypto/bcrypt/bcrypt_test.go | 226 ++ .../src/code.google.com/p/log4go/.hgtags | 4 + .../src/code.google.com/p/log4go/LICENSE | 13 + .../_workspace/src/code.google.com/p/log4go/README | 12 + .../src/code.google.com/p/log4go/config.go | 288 ++ .../p/log4go/examples/ConsoleLogWriter_Manual.go | 13 + .../p/log4go/examples/FileLogWriter_Manual.go | 57 + .../p/log4go/examples/SimpleNetLogServer.go | 42 + .../p/log4go/examples/SocketLogWriter_Manual.go | 18 + .../p/log4go/examples/XMLConfigurationExample.go | 13 + .../code.google.com/p/log4go/examples/example.xml | 47 + .../src/code.google.com/p/log4go/filelog.go | 239 ++ .../src/code.google.com/p/log4go/log4go.go | 484 +++ .../src/code.google.com/p/log4go/log4go_test.go | 534 ++++ .../src/code.google.com/p/log4go/pattlog.go | 122 + .../src/code.google.com/p/log4go/socklog.go | 57 + .../src/code.google.com/p/log4go/termlog.go | 45 + .../src/code.google.com/p/log4go/wrapper.go | 278 ++ .../src/github.com/anachronistic/apns/.gitignore | 24 + .../src/github.com/anachronistic/apns/LICENSE | 21 + .../src/github.com/anachronistic/apns/client.go | 167 + .../github.com/anachronistic/apns/client_mock.go | 21 + .../anachronistic/apns/client_mock_test.go | 24 + .../src/github.com/anachronistic/apns/feedback.go | 102 + .../src/github.com/anachronistic/apns/legacy.go | 15 + .../github.com/anachronistic/apns/legacy_test.go | 27 + .../anachronistic/apns/mock_feedback_server.go | 53 + .../anachronistic/apns/push_notification.go | 175 + .../apns/push_notification_response.go | 36 + .../anachronistic/apns/push_notification_test.go | 111 + .../awslabs/aws-sdk-go/aws/awsutil/path_value.go | 142 + .../aws-sdk-go/aws/awsutil/path_value_test.go | 60 + .../awslabs/aws-sdk-go/aws/awsutil/string_value.go | 88 + .../github.com/awslabs/aws-sdk-go/aws/config.go | 101 + .../awslabs/aws-sdk-go/aws/credentials.go | 288 ++ .../awslabs/aws-sdk-go/aws/credentials_test.go | 236 ++ .../src/github.com/awslabs/aws-sdk-go/aws/error.go | 26 + .../github.com/awslabs/aws-sdk-go/aws/example.ini | 8 + .../awslabs/aws-sdk-go/aws/handler_functions.go | 78 + .../github.com/awslabs/aws-sdk-go/aws/handlers.go | 65 + .../awslabs/aws-sdk-go/aws/handlers_test.go | 24 + .../awslabs/aws-sdk-go/aws/param_validator.go | 80 + .../awslabs/aws-sdk-go/aws/param_validator_test.go | 85 + .../github.com/awslabs/aws-sdk-go/aws/request.go | 149 + .../awslabs/aws-sdk-go/aws/request_test.go | 118 + .../github.com/awslabs/aws-sdk-go/aws/service.go | 142 + .../src/github.com/awslabs/aws-sdk-go/aws/types.go | 63 + .../github.com/awslabs/aws-sdk-go/aws/version.go | 5 + .../aws-sdk-go/internal/endpoints/endpoints.go | 24 + .../aws-sdk-go/internal/endpoints/endpoints.json | 67 + .../aws-sdk-go/internal/endpoints/endpoints_map.go | 78 + .../internal/endpoints/endpoints_test.go | 25 + .../aws-sdk-go/internal/protocol/query/build.go | 30 + .../internal/protocol/query/build_test.go | 1167 +++++++ .../internal/protocol/query/queryutil/queryutil.go | 198 ++ .../internal/protocol/query/unmarshal.go | 26 + .../internal/protocol/query/unmarshal_error.go | 31 + .../internal/protocol/query/unmarshal_test.go | 1361 ++++++++ .../aws-sdk-go/internal/protocol/rest/build.go | 215 ++ .../aws-sdk-go/internal/protocol/rest/payload.go | 43 + .../aws-sdk-go/internal/protocol/rest/unmarshal.go | 174 + .../internal/protocol/restxml/build_test.go | 2571 +++++++++++++++ .../internal/protocol/restxml/restxml.go | 48 + .../internal/protocol/restxml/unmarshal_test.go | 1171 +++++++ .../internal/protocol/xml/xmlutil/build.go | 262 ++ .../internal/protocol/xml/xmlutil/unmarshal.go | 251 ++ .../internal/protocol/xml/xmlutil/xml_to_struct.go | 100 + .../awslabs/aws-sdk-go/internal/signer/v4/v4.go | 296 ++ .../aws-sdk-go/internal/signer/v4/v4_test.go | 89 + .../awslabs/aws-sdk-go/service/route53/api.go | 2738 ++++++++++++++++ .../aws-sdk-go/service/route53/customizations.go | 20 + .../service/route53/customizations_test.go | 20 + .../aws-sdk-go/service/route53/examples_test.go | 714 +++++ .../awslabs/aws-sdk-go/service/route53/service.go | 59 + .../src/github.com/braintree/manners/LICENSE | 19 + .../github.com/braintree/manners/helper_test.go | 34 + .../src/github.com/braintree/manners/listener.go | 49 + .../src/github.com/braintree/manners/server.go | 83 + .../github.com/braintree/manners/server_test.go | 71 + .../src/github.com/go-gorp/gorp/.gitignore | 8 + .../src/github.com/go-gorp/gorp/.travis.yml | 23 + .../_workspace/src/github.com/go-gorp/gorp/LICENSE | 22 + .../src/github.com/go-gorp/gorp/Makefile | 6 + .../src/github.com/go-gorp/gorp/dialect.go | 696 ++++ .../src/github.com/go-gorp/gorp/errors.go | 26 + .../_workspace/src/github.com/go-gorp/gorp/gorp.go | 2178 +++++++++++++ .../src/github.com/go-gorp/gorp/gorp_test.go | 2170 +++++++++++++ .../src/github.com/go-gorp/gorp/test_all.sh | 22 + .../src/github.com/go-sql-driver/mysql/.gitignore | 8 + .../src/github.com/go-sql-driver/mysql/.travis.yml | 10 + .../src/github.com/go-sql-driver/mysql/AUTHORS | 42 + .../github.com/go-sql-driver/mysql/CHANGELOG.md | 92 + .../github.com/go-sql-driver/mysql/CONTRIBUTING.md | 40 + .../src/github.com/go-sql-driver/mysql/LICENSE | 373 +++ .../github.com/go-sql-driver/mysql/appengine.go | 19 + .../go-sql-driver/mysql/benchmark_test.go | 246 ++ .../src/github.com/go-sql-driver/mysql/buffer.go | 136 + .../github.com/go-sql-driver/mysql/collations.go | 250 ++ .../github.com/go-sql-driver/mysql/connection.go | 402 +++ .../src/github.com/go-sql-driver/mysql/const.go | 162 + .../src/github.com/go-sql-driver/mysql/driver.go | 140 + .../github.com/go-sql-driver/mysql/driver_test.go | 1657 ++++++++++ .../src/github.com/go-sql-driver/mysql/errors.go | 129 + .../github.com/go-sql-driver/mysql/errors_test.go | 42 + .../src/github.com/go-sql-driver/mysql/infile.go | 162 + .../src/github.com/go-sql-driver/mysql/packets.go | 1138 +++++++ .../src/github.com/go-sql-driver/mysql/result.go | 22 + .../src/github.com/go-sql-driver/mysql/rows.go | 102 + .../github.com/go-sql-driver/mysql/statement.go | 149 + .../github.com/go-sql-driver/mysql/transaction.go | 31 + .../src/github.com/go-sql-driver/mysql/utils.go | 963 ++++++ .../github.com/go-sql-driver/mysql/utils_test.go | 346 ++ .../src/github.com/goamz/goamz/aws/attempt.go | 74 + .../src/github.com/goamz/goamz/aws/attempt_test.go | 58 + .../src/github.com/goamz/goamz/aws/aws.go | 431 +++ .../src/github.com/goamz/goamz/aws/aws_test.go | 211 ++ .../src/github.com/goamz/goamz/aws/client.go | 124 + .../src/github.com/goamz/goamz/aws/client_test.go | 121 + .../src/github.com/goamz/goamz/aws/export_test.go | 29 + .../src/github.com/goamz/goamz/aws/regions.go | 243 ++ .../src/github.com/goamz/goamz/aws/sign.go | 357 +++ .../src/github.com/goamz/goamz/aws/sign_test.go | 525 +++ .../src/github.com/goamz/goamz/s3/export_test.go | 17 + .../src/github.com/goamz/goamz/s3/multi.go | 409 +++ .../src/github.com/goamz/goamz/s3/multi_test.go | 371 +++ .../github.com/goamz/goamz/s3/responses_test.go | 202 ++ .../_workspace/src/github.com/goamz/goamz/s3/s3.go | 1151 +++++++ .../src/github.com/goamz/goamz/s3/s3_test.go | 427 +++ .../src/github.com/goamz/goamz/s3/s3i_test.go | 590 ++++ .../src/github.com/goamz/goamz/s3/s3t_test.go | 79 + .../src/github.com/goamz/goamz/s3/s3test/server.go | 629 ++++ .../src/github.com/goamz/goamz/s3/sign.go | 114 + .../src/github.com/goamz/goamz/s3/sign_test.go | 132 + .../src/github.com/gorilla/context/.travis.yml | 9 + .../src/github.com/gorilla/context/LICENSE | 27 + .../src/github.com/gorilla/context/context.go | 143 + .../src/github.com/gorilla/context/context_test.go | 161 + .../src/github.com/gorilla/context/doc.go | 82 + .../src/github.com/gorilla/mux/.travis.yml | 7 + .../_workspace/src/github.com/gorilla/mux/LICENSE | 27 + .../src/github.com/gorilla/mux/bench_test.go | 21 + .../_workspace/src/github.com/gorilla/mux/doc.go | 199 ++ .../_workspace/src/github.com/gorilla/mux/mux.go | 366 +++ .../src/github.com/gorilla/mux/mux_test.go | 1012 ++++++ .../src/github.com/gorilla/mux/old_test.go | 714 +++++ .../src/github.com/gorilla/mux/regexp.go | 272 ++ .../_workspace/src/github.com/gorilla/mux/route.go | 571 ++++ .../src/github.com/gorilla/websocket/.gitignore | 22 + .../src/github.com/gorilla/websocket/.travis.yml | 6 + .../src/github.com/gorilla/websocket/AUTHORS | 8 + .../src/github.com/gorilla/websocket/LICENSE | 22 + .../src/github.com/gorilla/websocket/bench_test.go | 19 + .../src/github.com/gorilla/websocket/client.go | 264 ++ .../gorilla/websocket/client_server_test.go | 323 ++ .../github.com/gorilla/websocket/client_test.go | 63 + .../src/github.com/gorilla/websocket/conn.go | 825 +++++ .../src/github.com/gorilla/websocket/conn_test.go | 238 ++ .../src/github.com/gorilla/websocket/doc.go | 148 + .../websocket/examples/autobahn/fuzzingclient.json | 14 + .../gorilla/websocket/examples/autobahn/server.go | 246 ++ .../gorilla/websocket/examples/chat/conn.go | 106 + .../gorilla/websocket/examples/chat/home.html | 92 + .../gorilla/websocket/examples/chat/hub.go | 51 + .../gorilla/websocket/examples/chat/main.go | 39 + .../gorilla/websocket/examples/filewatch/main.go | 193 ++ .../src/github.com/gorilla/websocket/json.go | 57 + .../src/github.com/gorilla/websocket/json_test.go | 119 + .../src/github.com/gorilla/websocket/server.go | 247 ++ .../github.com/gorilla/websocket/server_test.go | 33 + .../src/github.com/gorilla/websocket/util.go | 44 + .../src/github.com/gorilla/websocket/util_test.go | 34 + .../src/github.com/huandu/facebook/CHANGELOG.md | 47 + .../src/github.com/huandu/facebook/CONTRIBUTING.md | 7 + .../src/github.com/huandu/facebook/LICENSE | 19 + .../src/github.com/huandu/facebook/api.go | 180 ++ .../src/github.com/huandu/facebook/app.go | 255 ++ .../src/github.com/huandu/facebook/batch_result.go | 52 + .../src/github.com/huandu/facebook/const.go | 74 + .../github.com/huandu/facebook/facebook_test.go | 1469 +++++++++ .../src/github.com/huandu/facebook/misc.go | 131 + .../github.com/huandu/facebook/paging_result.go | 146 + .../src/github.com/huandu/facebook/params.go | 227 ++ .../src/github.com/huandu/facebook/result.go | 1097 +++++++ .../src/github.com/huandu/facebook/session.go | 667 ++++ .../src/github.com/huandu/facebook/type.go | 127 + .../src/github.com/mssola/user_agent/.travis.yml | 11 + .../src/github.com/mssola/user_agent/LICENSE | 20 + .../src/github.com/mssola/user_agent/all_test.go | 257 ++ .../src/github.com/mssola/user_agent/bot.go | 121 + .../src/github.com/mssola/user_agent/browser.go | 120 + .../mssola/user_agent/operating_systems.go | 260 ++ .../src/github.com/mssola/user_agent/user_agent.go | 169 + .../src/github.com/nfnt/resize/.travis.yml | 7 + .../_workspace/src/github.com/nfnt/resize/LICENSE | 13 + .../src/github.com/nfnt/resize/converter.go | 452 +++ .../src/github.com/nfnt/resize/converter_test.go | 43 + .../src/github.com/nfnt/resize/filters.go | 143 + .../src/github.com/nfnt/resize/nearest.go | 318 ++ .../src/github.com/nfnt/resize/nearest_test.go | 57 + .../src/github.com/nfnt/resize/resize.go | 614 ++++ .../src/github.com/nfnt/resize/resize_test.go | 224 ++ .../src/github.com/nfnt/resize/thumbnail.go | 55 + .../src/github.com/nfnt/resize/thumbnail_test.go | 47 + .../_workspace/src/github.com/nfnt/resize/ycc.go | 227 ++ .../src/github.com/nfnt/resize/ycc_test.go | 214 ++ .../src/github.com/stretchr/objx/.gitignore | 22 + .../src/github.com/stretchr/objx/LICENSE.md | 23 + .../src/github.com/stretchr/objx/accessors.go | 179 ++ .../src/github.com/stretchr/objx/accessors_test.go | 145 + .../stretchr/objx/codegen/array-access.txt | 14 + .../github.com/stretchr/objx/codegen/index.html | 86 + .../github.com/stretchr/objx/codegen/template.txt | 286 ++ .../stretchr/objx/codegen/types_list.txt | 20 + .../src/github.com/stretchr/objx/constants.go | 13 + .../src/github.com/stretchr/objx/conversions.go | 117 + .../github.com/stretchr/objx/conversions_test.go | 94 + .../_workspace/src/github.com/stretchr/objx/doc.go | 72 + .../src/github.com/stretchr/objx/fixture_test.go | 98 + .../_workspace/src/github.com/stretchr/objx/map.go | 222 ++ .../src/github.com/stretchr/objx/map_for_test.go | 10 + .../src/github.com/stretchr/objx/map_test.go | 147 + .../src/github.com/stretchr/objx/mutations.go | 81 + .../src/github.com/stretchr/objx/mutations_test.go | 77 + .../src/github.com/stretchr/objx/security.go | 14 + .../src/github.com/stretchr/objx/security_test.go | 12 + .../stretchr/objx/simple_example_test.go | 41 + .../src/github.com/stretchr/objx/tests.go | 17 + .../src/github.com/stretchr/objx/tests_test.go | 24 + .../stretchr/objx/type_specific_codegen.go | 2881 +++++++++++++++++ .../stretchr/objx/type_specific_codegen_test.go | 2867 +++++++++++++++++ .../src/github.com/stretchr/objx/value.go | 13 + .../src/github.com/stretchr/objx/value_test.go | 1 + .../stretchr/testify/assert/assertions.go | 853 +++++ .../stretchr/testify/assert/assertions_test.go | 791 +++++ .../src/github.com/stretchr/testify/assert/doc.go | 154 + .../github.com/stretchr/testify/assert/errors.go | 10 + .../stretchr/testify/assert/forward_assertions.go | 265 ++ .../testify/assert/forward_assertions_test.go | 511 +++ .../stretchr/testify/assert/http_assertions.go | 157 + .../testify/assert/http_assertions_test.go | 86 + .../src/github.com/stretchr/testify/mock/doc.go | 43 + .../src/github.com/stretchr/testify/mock/mock.go | 566 ++++ .../github.com/stretchr/testify/mock/mock_test.go | 843 +++++ .../src/github.com/vaughan0/go-ini/LICENSE | 14 + .../src/github.com/vaughan0/go-ini/ini.go | 123 + .../github.com/vaughan0/go-ini/ini_linux_test.go | 43 + .../src/github.com/vaughan0/go-ini/ini_test.go | 89 + .../src/github.com/vaughan0/go-ini/test.ini | 2 + .../src/golang.org/x/crypto/blowfish/block.go | 159 + .../golang.org/x/crypto/blowfish/blowfish_test.go | 274 ++ .../src/golang.org/x/crypto/blowfish/cipher.go | 91 + .../src/golang.org/x/crypto/blowfish/const.go | 199 ++ .../_workspace/src/gopkg.in/bufio.v1/.travis.yml | 11 + Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE | 27 + Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile | 2 + Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go | 413 +++ .../src/gopkg.in/bufio.v1/buffer_test.go | 527 ++++ Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go | 728 +++++ .../_workspace/src/gopkg.in/bufio.v1/bufio_test.go | 1418 +++++++++ .../src/gopkg.in/bufio.v1/export_test.go | 9 + .../_workspace/src/gopkg.in/fsnotify.v1/.gitignore | 6 + .../src/gopkg.in/fsnotify.v1/.travis.yml | 15 + Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS | 34 + .../src/gopkg.in/fsnotify.v1/CHANGELOG.md | 263 ++ .../src/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 + Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE | 28 + .../src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace | 0 .../_workspace/src/gopkg.in/fsnotify.v1/circle.yml | 26 + .../src/gopkg.in/fsnotify.v1/example_test.go | 42 + .../src/gopkg.in/fsnotify.v1/fsnotify.go | 62 + .../_workspace/src/gopkg.in/fsnotify.v1/inotify.go | 306 ++ .../src/gopkg.in/fsnotify.v1/inotify_poller.go | 186 ++ .../gopkg.in/fsnotify.v1/inotify_poller_test.go | 228 ++ .../src/gopkg.in/fsnotify.v1/inotify_test.go | 292 ++ .../src/gopkg.in/fsnotify.v1/integration_test.go | 1135 +++++++ .../_workspace/src/gopkg.in/fsnotify.v1/kqueue.go | 463 +++ .../src/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../src/gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + .../_workspace/src/gopkg.in/fsnotify.v1/windows.go | 561 ++++ .../_workspace/src/gopkg.in/redis.v2/.travis.yml | 19 + Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE | 27 + Godeps/_workspace/src/gopkg.in/redis.v2/Makefile | 3 + Godeps/_workspace/src/gopkg.in/redis.v2/command.go | 597 ++++ .../_workspace/src/gopkg.in/redis.v2/commands.go | 1246 ++++++++ Godeps/_workspace/src/gopkg.in/redis.v2/doc.go | 4 + Godeps/_workspace/src/gopkg.in/redis.v2/error.go | 23 + .../src/gopkg.in/redis.v2/example_test.go | 180 ++ .../src/gopkg.in/redis.v2/export_test.go | 5 + Godeps/_workspace/src/gopkg.in/redis.v2/multi.go | 138 + Godeps/_workspace/src/gopkg.in/redis.v2/parser.go | 262 ++ .../src/gopkg.in/redis.v2/parser_test.go | 54 + .../_workspace/src/gopkg.in/redis.v2/pipeline.go | 91 + Godeps/_workspace/src/gopkg.in/redis.v2/pool.go | 405 +++ Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go | 134 + .../_workspace/src/gopkg.in/redis.v2/rate_limit.go | 53 + .../src/gopkg.in/redis.v2/rate_limit_test.go | 31 + Godeps/_workspace/src/gopkg.in/redis.v2/redis.go | 231 ++ .../_workspace/src/gopkg.in/redis.v2/redis_test.go | 3333 ++++++++++++++++++++ Godeps/_workspace/src/gopkg.in/redis.v2/script.go | 52 + .../_workspace/src/gopkg.in/redis.v2/sentinel.go | 291 ++ .../src/gopkg.in/redis.v2/sentinel_test.go | 185 ++ .../src/gopkg.in/redis.v2/testdata/sentinel.conf | 6 + 365 files changed, 90059 insertions(+) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/advanced_path.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/arc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/Makefile create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/arc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64_others.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/curve_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/quad_float64.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curves.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/dasher.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/demux_converter.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/doc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/font.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/gc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/image.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/math.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/paint.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_adder.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_converter.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_storage.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/coverage_table.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerAA.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV1/fillerAA.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV2/fillerAA.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fixed_point.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/line.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/polygon.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/raster_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/rgba_interpolation.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stack_gc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stroker.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/transform.go create mode 100644 Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/vertex2d.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/geom.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/paint.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/raster.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/stroke.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/glyph.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/opcodes.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go create mode 100644 Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/.hgtags create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/LICENSE create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/README create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/config.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/FileLogWriter_Manual.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/SimpleNetLogServer.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/XMLConfigurationExample.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/examples/example.xml create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/filelog.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/log4go.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/log4go_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/pattlog.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/socklog.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/termlog.go create mode 100644 Godeps/_workspace/src/code.google.com/p/log4go/wrapper.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/.gitignore create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/LICENSE create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/client.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/client_mock.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/client_mock_test.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/feedback.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/legacy.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/legacy_test.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/mock_feedback_server.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/push_notification.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_response.go create mode 100644 Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/config.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/error.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/example.ini create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handler_functions.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/service.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/types.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/version.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.json create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_map.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_error.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/build.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/payload.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/unmarshal.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/build_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/restxml.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/build.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/api.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/examples_test.go create mode 100644 Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/service.go create mode 100644 Godeps/_workspace/src/github.com/braintree/manners/LICENSE create mode 100644 Godeps/_workspace/src/github.com/braintree/manners/helper_test.go create mode 100644 Godeps/_workspace/src/github.com/braintree/manners/listener.go create mode 100644 Godeps/_workspace/src/github.com/braintree/manners/server.go create mode 100644 Godeps/_workspace/src/github.com/braintree/manners/server_test.go create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/.gitignore create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/LICENSE create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/Makefile create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/dialect.go create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/errors.go create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/gorp.go create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/gorp_test.go create mode 100644 Godeps/_workspace/src/github.com/go-gorp/gorp/test_all.sh create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go create mode 100644 Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/old_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/regexp.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/route.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/client.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/conn.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/json.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/server.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/util.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/LICENSE create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/api.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/app.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/batch_result.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/const.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/facebook_test.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/misc.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/paging_result.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/params.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/result.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/session.go create mode 100644 Godeps/_workspace/src/github.com/huandu/facebook/type.go create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/LICENSE create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/all_test.go create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/bot.go create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/browser.go create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/operating_systems.go create mode 100644 Godeps/_workspace/src/github.com/mssola/user_agent/user_agent.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/LICENSE create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/converter.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/converter_test.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/filters.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/nearest.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/nearest_test.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/resize.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/resize_test.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/thumbnail.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/thumbnail_test.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/ycc.go create mode 100644 Godeps/_workspace/src/github.com/nfnt/resize/ycc_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/.gitignore create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/constants.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go create mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE create mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go create mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go create mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/command.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/commands.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/doc.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/error.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/multi.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/parser.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pool.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/redis.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/script.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf (limited to 'Godeps') diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 000000000..87207c5e1 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,156 @@ +{ + "ImportPath": "github.com/mattermost/platform", + "GoVersion": "go1.4", + "Deps": [ + { + "ImportPath": "code.google.com/p/draw2d/draw2d", + "Comment": "release-20", + "Rev": "eaeb833648eee1b7c20e77ffc8180646c0395298" + }, + { + "ImportPath": "code.google.com/p/freetype-go/freetype/raster", + "Comment": "release-85", + "Rev": "46c3056cafbb4da11c4087a892c7d2bfa4224a8f" + }, + { + "ImportPath": "code.google.com/p/freetype-go/freetype/truetype", + "Comment": "release-85", + "Rev": "46c3056cafbb4da11c4087a892c7d2bfa4224a8f" + }, + { + "ImportPath": "code.google.com/p/go-uuid/uuid", + "Comment": "null-15", + "Rev": "35bc42037350f0078e3c974c6ea690f1926603ab" + }, + { + "ImportPath": "code.google.com/p/go.crypto/bcrypt", + "Comment": "null-236", + "Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e" + }, + { + "ImportPath": "code.google.com/p/log4go", + "Comment": "go.weekly.2012-02-22-1", + "Rev": "c3294304d93f48a37d3bed1d382882a9c2989f99" + }, + { + "ImportPath": "github.com/anachronistic/apns", + "Rev": "f90152b7e0ae845938ce1258d90072fd116131ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/aws", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/endpoints", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/protocol/query", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/protocol/rest", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/protocol/restxml", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/internal/signer/v4", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/service/route53", + "Rev": "44bfbd5e852715e1acfcd49da3f5688e4b5377ac" + }, + { + "ImportPath": "github.com/braintree/manners", + "Comment": "0.3.1-2-g5280e25", + "Rev": "5280e250f2795914acbeb2bf3b55dd5a2d1fba52" + }, + { + "ImportPath": "github.com/go-gorp/gorp", + "Comment": "v1.7-65-g3c15f67", + "Rev": "3c15f6739b94dc357e2797a7297a2853ec79f4fa" + }, + { + "ImportPath": "github.com/go-sql-driver/mysql", + "Comment": "v1.2-97-g0cc29e9", + "Rev": "0cc29e9fe8e25c2c58cf47bcab566e029bbaa88b" + }, + { + "ImportPath": "github.com/goamz/goamz/aws", + "Rev": "ad637a587dd8314770a1084481dd7b5d4fa1232f" + }, + { + "ImportPath": "github.com/goamz/goamz/s3", + "Rev": "ad637a587dd8314770a1084481dd7b5d4fa1232f" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "94903de8c98a68d8b4483c529b26a5d146e386a2" + }, + { + "ImportPath": "github.com/gorilla/websocket", + "Rev": "6fd0f867fef40c540fa05c59f86396de10a632a6" + }, + { + "ImportPath": "github.com/huandu/facebook", + "Comment": "v1.5.2", + "Rev": "7c46d89211b9a3f01087f47f6a399b815801ade4" + }, + { + "ImportPath": "github.com/mssola/user_agent", + "Comment": "v0.4.1-2-g35c7f18", + "Rev": "35c7f18f5261cc18c698a461053c119aebaf8542" + }, + { + "ImportPath": "github.com/nfnt/resize", + "Rev": "f2d1b73023c55bdfb4fa2ed385d2bec95bce3692" + }, + { + "ImportPath": "github.com/stretchr/objx", + "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" + }, + { + "ImportPath": "github.com/stretchr/testify/assert", + "Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a" + }, + { + "ImportPath": "github.com/stretchr/testify/mock", + "Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a" + }, + { + "ImportPath": "github.com/vaughan0/go-ini", + "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "74f810a0152f4c50a16195f6b9ff44afc35594e8" + }, + { + "ImportPath": "gopkg.in/bufio.v1", + "Comment": "v1", + "Rev": "567b2bfa514e796916c4747494d6ff5132a1dfce" + }, + { + "ImportPath": "gopkg.in/fsnotify.v1", + "Comment": "v1.2.0", + "Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0" + }, + { + "ImportPath": "gopkg.in/redis.v2", + "Comment": "v2.3.2", + "Rev": "e6179049628164864e6e84e973cfb56335748dea" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 000000000..f037d684e --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/advanced_path.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/advanced_path.go new file mode 100644 index 000000000..68f1d782b --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/advanced_path.go @@ -0,0 +1,42 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +import ( + "math" +) + +//high level path creation + +func Rect(path Path, x1, y1, x2, y2 float64) { + path.MoveTo(x1, y1) + path.LineTo(x2, y1) + path.LineTo(x2, y2) + path.LineTo(x1, y2) + path.Close() +} + +func RoundRect(path Path, x1, y1, x2, y2, arcWidth, arcHeight float64) { + arcWidth = arcWidth / 2 + arcHeight = arcHeight / 2 + path.MoveTo(x1, y1+arcHeight) + path.QuadCurveTo(x1, y1, x1+arcWidth, y1) + path.LineTo(x2-arcWidth, y1) + path.QuadCurveTo(x2, y1, x2, y1+arcHeight) + path.LineTo(x2, y2-arcHeight) + path.QuadCurveTo(x2, y2, x2-arcWidth, y2) + path.LineTo(x1+arcWidth, y2) + path.QuadCurveTo(x1, y2, x1, y2-arcHeight) + path.Close() +} + +func Ellipse(path Path, cx, cy, rx, ry float64) { + path.ArcTo(cx, cy, rx, ry, 0, -math.Pi*2) + path.Close() +} + +func Circle(path Path, cx, cy, radius float64) { + path.ArcTo(cx, cy, radius, radius, 0, -math.Pi*2) + path.Close() +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/arc.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/arc.go new file mode 100644 index 000000000..0698b8da0 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/arc.go @@ -0,0 +1,67 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/raster" + "math" +) + +func arc(t VertexConverter, x, y, rx, ry, start, angle, scale float64) (lastX, lastY float64) { + end := start + angle + clockWise := true + if angle < 0 { + clockWise = false + } + ra := (math.Abs(rx) + math.Abs(ry)) / 2 + da := math.Acos(ra/(ra+0.125/scale)) * 2 + //normalize + if !clockWise { + da = -da + } + angle = start + da + var curX, curY float64 + for { + if (angle < end-da/4) != clockWise { + curX = x + math.Cos(end)*rx + curY = y + math.Sin(end)*ry + return curX, curY + } + curX = x + math.Cos(angle)*rx + curY = y + math.Sin(angle)*ry + + angle += da + t.Vertex(curX, curY) + } + return curX, curY +} + +func arcAdder(adder raster.Adder, x, y, rx, ry, start, angle, scale float64) raster.Point { + end := start + angle + clockWise := true + if angle < 0 { + clockWise = false + } + ra := (math.Abs(rx) + math.Abs(ry)) / 2 + da := math.Acos(ra/(ra+0.125/scale)) * 2 + //normalize + if !clockWise { + da = -da + } + angle = start + da + var curX, curY float64 + for { + if (angle < end-da/4) != clockWise { + curX = x + math.Cos(end)*rx + curY = y + math.Sin(end)*ry + return raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)} + } + curX = x + math.Cos(angle)*rx + curY = y + math.Sin(angle)*ry + + angle += da + adder.Add1(raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)}) + } + return raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)} +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/Makefile b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/Makefile new file mode 100644 index 000000000..15ceee070 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/Makefile @@ -0,0 +1,11 @@ +include $(GOROOT)/src/Make.inc + +TARG=draw2d.googlecode.com/hg/draw2d/curve +GOFILES=\ + cubic_float64.go\ + quad_float64.go\ + cubic_float64_others.go\ + + + +include $(GOROOT)/src/Make.pkg diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/arc.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/arc.go new file mode 100644 index 000000000..92850e979 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/arc.go @@ -0,0 +1,36 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff +package curve + +import ( + "math" +) + +func SegmentArc(t LineTracer, x, y, rx, ry, start, angle, scale float64) { + end := start + angle + clockWise := true + if angle < 0 { + clockWise = false + } + ra := (math.Abs(rx) + math.Abs(ry)) / 2 + da := math.Acos(ra/(ra+0.125/scale)) * 2 + //normalize + if !clockWise { + da = -da + } + angle = start + da + var curX, curY float64 + for { + if (angle < end-da/4) != clockWise { + curX = x + math.Cos(end)*rx + curY = y + math.Sin(end)*ry + break; + } + curX = x + math.Cos(angle)*rx + curY = y + math.Sin(angle)*ry + + angle += da + t.LineTo(curX, curY) + } + t.LineTo(curX, curY) +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64.go new file mode 100644 index 000000000..64a7ac639 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64.go @@ -0,0 +1,67 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 17/05/2011 by Laurent Le Goff +package curve + +import ( + "math" +) + +const ( + CurveRecursionLimit = 32 +) + +// X1, Y1, X2, Y2, X3, Y3, X4, Y4 float64 +type CubicCurveFloat64 [8]float64 + +type LineTracer interface { + LineTo(x, y float64) +} + +func (c *CubicCurveFloat64) Subdivide(c1, c2 *CubicCurveFloat64) (x23, y23 float64) { + // Calculate all the mid-points of the line segments + //---------------------- + c1[0], c1[1] = c[0], c[1] + c2[6], c2[7] = c[6], c[7] + c1[2] = (c[0] + c[2]) / 2 + c1[3] = (c[1] + c[3]) / 2 + x23 = (c[2] + c[4]) / 2 + y23 = (c[3] + c[5]) / 2 + c2[4] = (c[4] + c[6]) / 2 + c2[5] = (c[5] + c[7]) / 2 + c1[4] = (c1[2] + x23) / 2 + c1[5] = (c1[3] + y23) / 2 + c2[2] = (x23 + c2[4]) / 2 + c2[3] = (y23 + c2[5]) / 2 + c1[6] = (c1[4] + c2[2]) / 2 + c1[7] = (c1[5] + c2[3]) / 2 + c2[0], c2[1] = c1[6], c1[7] + return +} + +func (curve *CubicCurveFloat64) Segment(t LineTracer, flattening_threshold float64) { + var curves [CurveRecursionLimit]CubicCurveFloat64 + curves[0] = *curve + i := 0 + // current curve + var c *CubicCurveFloat64 + + var dx, dy, d2, d3 float64 + + for i >= 0 { + c = &curves[i] + dx = c[6] - c[0] + dy = c[7] - c[1] + + d2 = math.Abs(((c[2]-c[6])*dy - (c[3]-c[7])*dx)) + d3 = math.Abs(((c[4]-c[6])*dy - (c[5]-c[7])*dx)) + + if (d2+d3)*(d2+d3) < flattening_threshold*(dx*dx+dy*dy) || i == len(curves)-1 { + t.LineTo(c[6], c[7]) + i-- + } else { + // second half of bezier go lower onto the stack + c.Subdivide(&curves[i+1], &curves[i]) + i++ + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64_others.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64_others.go new file mode 100644 index 000000000..a888b22a1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/cubic_float64_others.go @@ -0,0 +1,696 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 17/05/2011 by Laurent Le Goff +package curve + +import ( + "math" +) + +const ( + CurveCollinearityEpsilon = 1e-30 + CurveAngleToleranceEpsilon = 0.01 +) + +//mu ranges from 0 to 1, start to end of curve +func (c *CubicCurveFloat64) ArbitraryPoint(mu float64) (x, y float64) { + + mum1 := 1 - mu + mum13 := mum1 * mum1 * mum1 + mu3 := mu * mu * mu + + x = mum13*c[0] + 3*mu*mum1*mum1*c[2] + 3*mu*mu*mum1*c[4] + mu3*c[6] + y = mum13*c[1] + 3*mu*mum1*mum1*c[3] + 3*mu*mu*mum1*c[5] + mu3*c[7] + return +} + +func (c *CubicCurveFloat64) SubdivideAt(c1, c2 *CubicCurveFloat64, t float64) (x23, y23 float64) { + inv_t := (1 - t) + c1[0], c1[1] = c[0], c[1] + c2[6], c2[7] = c[6], c[7] + + c1[2] = inv_t*c[0] + t*c[2] + c1[3] = inv_t*c[1] + t*c[3] + + x23 = inv_t*c[2] + t*c[4] + y23 = inv_t*c[3] + t*c[5] + + c2[4] = inv_t*c[4] + t*c[6] + c2[5] = inv_t*c[5] + t*c[7] + + c1[4] = inv_t*c1[2] + t*x23 + c1[5] = inv_t*c1[3] + t*y23 + + c2[2] = inv_t*x23 + t*c2[4] + c2[3] = inv_t*y23 + t*c2[5] + + c1[6] = inv_t*c1[4] + t*c2[2] + c1[7] = inv_t*c1[5] + t*c2[3] + + c2[0], c2[1] = c1[6], c1[7] + return +} + +func (c *CubicCurveFloat64) EstimateDistance() float64 { + dx1 := c[2] - c[0] + dy1 := c[3] - c[1] + dx2 := c[4] - c[2] + dy2 := c[5] - c[3] + dx3 := c[6] - c[4] + dy3 := c[7] - c[5] + return math.Sqrt(dx1*dx1+dy1*dy1) + math.Sqrt(dx2*dx2+dy2*dy2) + math.Sqrt(dx3*dx3+dy3*dy3) +} + +// subdivide the curve in straight lines using line approximation and Casteljau recursive subdivision +func (c *CubicCurveFloat64) SegmentRec(t LineTracer, flattening_threshold float64) { + c.segmentRec(t, flattening_threshold) + t.LineTo(c[6], c[7]) +} + +func (c *CubicCurveFloat64) segmentRec(t LineTracer, flattening_threshold float64) { + var c1, c2 CubicCurveFloat64 + c.Subdivide(&c1, &c2) + + // Try to approximate the full cubic curve by a single straight line + //------------------ + dx := c[6] - c[0] + dy := c[7] - c[1] + + d2 := math.Abs(((c[2]-c[6])*dy - (c[3]-c[7])*dx)) + d3 := math.Abs(((c[4]-c[6])*dy - (c[5]-c[7])*dx)) + + if (d2+d3)*(d2+d3) < flattening_threshold*(dx*dx+dy*dy) { + t.LineTo(c[6], c[7]) + return + } + // Continue subdivision + //---------------------- + c1.segmentRec(t, flattening_threshold) + c2.segmentRec(t, flattening_threshold) +} + +/* + The function has the following parameters: + approximationScale : + Eventually determines the approximation accuracy. In practice we need to transform points from the World coordinate system to the Screen one. + It always has some scaling coefficient. + The curves are usually processed in the World coordinates, while the approximation accuracy should be eventually in pixels. + Usually it looks as follows: + curved.approximationScale(transform.scale()); + where transform is the affine matrix that includes all the transformations, including viewport and zoom. + angleTolerance : + You set it in radians. + The less this value is the more accurate will be the approximation at sharp turns. + But 0 means that we don't consider angle conditions at all. + cuspLimit : + An angle in radians. + If 0, only the real cusps will have bevel cuts. + If more than 0, it will restrict the sharpness. + The more this value is the less sharp turns will be cut. + Typically it should not exceed 10-15 degrees. +*/ +func (c *CubicCurveFloat64) AdaptiveSegmentRec(t LineTracer, approximationScale, angleTolerance, cuspLimit float64) { + cuspLimit = computeCuspLimit(cuspLimit) + distanceToleranceSquare := 0.5 / approximationScale + distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare + c.adaptiveSegmentRec(t, 0, distanceToleranceSquare, angleTolerance, cuspLimit) + t.LineTo(c[6], c[7]) +} + +func computeCuspLimit(v float64) (r float64) { + if v == 0.0 { + r = 0.0 + } else { + r = math.Pi - v + } + return +} + +func squareDistance(x1, y1, x2, y2 float64) float64 { + dx := x2 - x1 + dy := y2 - y1 + return dx*dx + dy*dy +} + +/** + * http://www.antigrain.com/research/adaptive_bezier/index.html + */ +func (c *CubicCurveFloat64) adaptiveSegmentRec(t LineTracer, level int, distanceToleranceSquare, angleTolerance, cuspLimit float64) { + if level > CurveRecursionLimit { + return + } + var c1, c2 CubicCurveFloat64 + x23, y23 := c.Subdivide(&c1, &c2) + + // Try to approximate the full cubic curve by a single straight line + //------------------ + dx := c[6] - c[0] + dy := c[7] - c[1] + + d2 := math.Abs(((c[2]-c[6])*dy - (c[3]-c[7])*dx)) + d3 := math.Abs(((c[4]-c[6])*dy - (c[5]-c[7])*dx)) + switch { + case d2 <= CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // All collinear OR p1==p4 + //---------------------- + k := dx*dx + dy*dy + if k == 0 { + d2 = squareDistance(c[0], c[1], c[2], c[3]) + d3 = squareDistance(c[6], c[7], c[4], c[5]) + } else { + k = 1 / k + da1 := c[2] - c[0] + da2 := c[3] - c[1] + d2 = k * (da1*dx + da2*dy) + da1 = c[4] - c[0] + da2 = c[5] - c[1] + d3 = k * (da1*dx + da2*dy) + if d2 > 0 && d2 < 1 && d3 > 0 && d3 < 1 { + // Simple collinear case, 1---2---3---4 + // We can leave just two endpoints + return + } + if d2 <= 0 { + d2 = squareDistance(c[2], c[3], c[0], c[1]) + } else if d2 >= 1 { + d2 = squareDistance(c[2], c[3], c[6], c[7]) + } else { + d2 = squareDistance(c[2], c[3], c[0]+d2*dx, c[1]+d2*dy) + } + + if d3 <= 0 { + d3 = squareDistance(c[4], c[5], c[0], c[1]) + } else if d3 >= 1 { + d3 = squareDistance(c[4], c[5], c[6], c[7]) + } else { + d3 = squareDistance(c[4], c[5], c[0]+d3*dx, c[1]+d3*dy) + } + } + if d2 > d3 { + if d2 < distanceToleranceSquare { + t.LineTo(c[2], c[3]) + return + } + } else { + if d3 < distanceToleranceSquare { + t.LineTo(c[4], c[5]) + return + } + } + + case d2 <= CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // p1,p2,p4 are collinear, p3 is significant + //---------------------- + if d3*d3 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + return + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(c[7]-c[5], c[6]-c[4]) - math.Atan2(c[5]-c[3], c[4]-c[2])) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + t.LineTo(c[2], c[3]) + t.LineTo(c[4], c[5]) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[4], c[5]) + return + } + } + } + + case d2 > CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // p1,p3,p4 are collinear, p2 is significant + //---------------------- + if d2*d2 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + return + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(c[5]-c[3], c[4]-c[2]) - math.Atan2(c[3]-c[1], c[2]-c[0])) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + t.LineTo(c[2], c[3]) + t.LineTo(c[4], c[5]) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[2], c[3]) + return + } + } + } + + case d2 > CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // Regular case + //----------------- + if (d2+d3)*(d2+d3) <= distanceToleranceSquare*(dx*dx+dy*dy) { + // If the curvature doesn't exceed the distanceTolerance value + // we tend to finish subdivisions. + //---------------------- + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + return + } + + // Angle & Cusp Condition + //---------------------- + k := math.Atan2(c[5]-c[3], c[4]-c[2]) + da1 := math.Abs(k - math.Atan2(c[3]-c[1], c[2]-c[0])) + da2 := math.Abs(math.Atan2(c[7]-c[5], c[6]-c[4]) - k) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + if da2 >= math.Pi { + da2 = 2*math.Pi - da2 + } + + if da1+da2 < angleTolerance { + // Finally we can stop the recursion + //---------------------- + t.LineTo(x23, y23) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[2], c[3]) + return + } + + if da2 > cuspLimit { + t.LineTo(c[4], c[5]) + return + } + } + } + } + + // Continue subdivision + //---------------------- + c1.adaptiveSegmentRec(t, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) + c2.adaptiveSegmentRec(t, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) + +} + +func (curve *CubicCurveFloat64) AdaptiveSegment(t LineTracer, approximationScale, angleTolerance, cuspLimit float64) { + cuspLimit = computeCuspLimit(cuspLimit) + distanceToleranceSquare := 0.5 / approximationScale + distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare + + var curves [CurveRecursionLimit]CubicCurveFloat64 + curves[0] = *curve + i := 0 + // current curve + var c *CubicCurveFloat64 + var c1, c2 CubicCurveFloat64 + var dx, dy, d2, d3, k, x23, y23 float64 + for i >= 0 { + c = &curves[i] + x23, y23 = c.Subdivide(&c1, &c2) + + // Try to approximate the full cubic curve by a single straight line + //------------------ + dx = c[6] - c[0] + dy = c[7] - c[1] + + d2 = math.Abs(((c[2]-c[6])*dy - (c[3]-c[7])*dx)) + d3 = math.Abs(((c[4]-c[6])*dy - (c[5]-c[7])*dx)) + switch { + case i == len(curves)-1: + t.LineTo(c[6], c[7]) + i-- + continue + case d2 <= CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // All collinear OR p1==p4 + //---------------------- + k = dx*dx + dy*dy + if k == 0 { + d2 = squareDistance(c[0], c[1], c[2], c[3]) + d3 = squareDistance(c[6], c[7], c[4], c[5]) + } else { + k = 1 / k + da1 := c[2] - c[0] + da2 := c[3] - c[1] + d2 = k * (da1*dx + da2*dy) + da1 = c[4] - c[0] + da2 = c[5] - c[1] + d3 = k * (da1*dx + da2*dy) + if d2 > 0 && d2 < 1 && d3 > 0 && d3 < 1 { + // Simple collinear case, 1---2---3---4 + // We can leave just two endpoints + i-- + continue + } + if d2 <= 0 { + d2 = squareDistance(c[2], c[3], c[0], c[1]) + } else if d2 >= 1 { + d2 = squareDistance(c[2], c[3], c[6], c[7]) + } else { + d2 = squareDistance(c[2], c[3], c[0]+d2*dx, c[1]+d2*dy) + } + + if d3 <= 0 { + d3 = squareDistance(c[4], c[5], c[0], c[1]) + } else if d3 >= 1 { + d3 = squareDistance(c[4], c[5], c[6], c[7]) + } else { + d3 = squareDistance(c[4], c[5], c[0]+d3*dx, c[1]+d3*dy) + } + } + if d2 > d3 { + if d2 < distanceToleranceSquare { + t.LineTo(c[2], c[3]) + i-- + continue + } + } else { + if d3 < distanceToleranceSquare { + t.LineTo(c[4], c[5]) + i-- + continue + } + } + + case d2 <= CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // p1,p2,p4 are collinear, p3 is significant + //---------------------- + if d3*d3 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + i-- + continue + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(c[7]-c[5], c[6]-c[4]) - math.Atan2(c[5]-c[3], c[4]-c[2])) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + t.LineTo(c[2], c[3]) + t.LineTo(c[4], c[5]) + i-- + continue + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[4], c[5]) + i-- + continue + } + } + } + + case d2 > CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // p1,p3,p4 are collinear, p2 is significant + //---------------------- + if d2*d2 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + i-- + continue + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(c[5]-c[3], c[4]-c[2]) - math.Atan2(c[3]-c[1], c[2]-c[0])) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + t.LineTo(c[2], c[3]) + t.LineTo(c[4], c[5]) + i-- + continue + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[2], c[3]) + i-- + continue + } + } + } + + case d2 > CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // Regular case + //----------------- + if (d2+d3)*(d2+d3) <= distanceToleranceSquare*(dx*dx+dy*dy) { + // If the curvature doesn't exceed the distanceTolerance value + // we tend to finish subdivisions. + //---------------------- + if angleTolerance < CurveAngleToleranceEpsilon { + t.LineTo(x23, y23) + i-- + continue + } + + // Angle & Cusp Condition + //---------------------- + k := math.Atan2(c[5]-c[3], c[4]-c[2]) + da1 := math.Abs(k - math.Atan2(c[3]-c[1], c[2]-c[0])) + da2 := math.Abs(math.Atan2(c[7]-c[5], c[6]-c[4]) - k) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + if da2 >= math.Pi { + da2 = 2*math.Pi - da2 + } + + if da1+da2 < angleTolerance { + // Finally we can stop the recursion + //---------------------- + t.LineTo(x23, y23) + i-- + continue + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + t.LineTo(c[2], c[3]) + i-- + continue + } + + if da2 > cuspLimit { + t.LineTo(c[4], c[5]) + i-- + continue + } + } + } + } + + // Continue subdivision + //---------------------- + curves[i+1], curves[i] = c1, c2 + i++ + } + t.LineTo(curve[6], curve[7]) +} + +/********************** Ahmad thesis *******************/ + +/************************************************************************************** +* This code is the implementation of the Parabolic Approximation (PA). Although * +* it uses recursive subdivision as a safe net for the failing cases, this is an * +* iterative routine and reduces considerably the number of vertices (point) * +* generation. * +**************************************************************************************/ + +func (c *CubicCurveFloat64) ParabolicSegment(t LineTracer, flattening_threshold float64) { + estimatedIFP := c.numberOfInflectionPoints() + if estimatedIFP == 0 { + // If no inflection points then apply PA on the full Bezier segment. + c.doParabolicApproximation(t, flattening_threshold) + return + } + // If one or more inflection point then we will have to subdivide the curve + numOfIfP, t1, t2 := c.findInflectionPoints() + if numOfIfP == 2 { + // Case when 2 inflection points then divide at the smallest one first + var sub1, tmp1, sub2, sub3 CubicCurveFloat64 + c.SubdivideAt(&sub1, &tmp1, t1) + // Now find the second inflection point in the second curve an subdivide + numOfIfP, t1, t2 = tmp1.findInflectionPoints() + if numOfIfP == 2 { + tmp1.SubdivideAt(&sub2, &sub3, t2) + } else if numOfIfP == 1 { + tmp1.SubdivideAt(&sub2, &sub3, t1) + } else { + return + } + // Use PA for first subsegment + sub1.doParabolicApproximation(t, flattening_threshold) + // Use RS for the second (middle) subsegment + sub2.Segment(t, flattening_threshold) + // Drop the last point in the array will be added by the PA in third subsegment + //noOfPoints--; + // Use PA for the third curve + sub3.doParabolicApproximation(t, flattening_threshold) + } else if numOfIfP == 1 { + // Case where there is one inflection point, subdivide once and use PA on + // both subsegments + var sub1, sub2 CubicCurveFloat64 + c.SubdivideAt(&sub1, &sub2, t1) + sub1.doParabolicApproximation(t, flattening_threshold) + //noOfPoints--; + sub2.doParabolicApproximation(t, flattening_threshold) + } else { + // Case where there is no inflection USA PA directly + c.doParabolicApproximation(t, flattening_threshold) + } +} + +// Find the third control point deviation form the axis +func (c *CubicCurveFloat64) thirdControlPointDeviation() float64 { + dx := c[2] - c[0] + dy := c[3] - c[1] + l2 := dx*dx + dy*dy + if l2 == 0 { + return 0 + } + l := math.Sqrt(l2) + r := (c[3] - c[1]) / l + s := (c[0] - c[2]) / l + u := (c[2]*c[1] - c[0]*c[3]) / l + return math.Abs(r*c[4] + s*c[5] + u) +} + +// Find the number of inflection point +func (c *CubicCurveFloat64) numberOfInflectionPoints() int { + dx21 := (c[2] - c[0]) + dy21 := (c[3] - c[1]) + dx32 := (c[4] - c[2]) + dy32 := (c[5] - c[3]) + dx43 := (c[6] - c[4]) + dy43 := (c[7] - c[5]) + if ((dx21*dy32 - dy21*dx32) * (dx32*dy43 - dy32*dx43)) < 0 { + return 1 // One inflection point + } else if ((dx21*dy32 - dy21*dx32) * (dx21*dy43 - dy21*dx43)) > 0 { + return 0 // No inflection point + } else { + // Most cases no inflection point + b1 := (dx21*dx32 + dy21*dy32) > 0 + b2 := (dx32*dx43 + dy32*dy43) > 0 + if b1 || b2 && !(b1 && b2) { // xor!! + return 0 + } + } + return -1 // cases where there in zero or two inflection points +} + +// This is the main function where all the work is done +func (curve *CubicCurveFloat64) doParabolicApproximation(tracer LineTracer, flattening_threshold float64) { + var c *CubicCurveFloat64 + c = curve + var d, t, dx, dy, d2, d3 float64 + for { + dx = c[6] - c[0] + dy = c[7] - c[1] + + d2 = math.Abs(((c[2]-c[6])*dy - (c[3]-c[7])*dx)) + d3 = math.Abs(((c[4]-c[6])*dy - (c[5]-c[7])*dx)) + + if (d2+d3)*(d2+d3) < flattening_threshold*(dx*dx+dy*dy) { + // If the subsegment deviation satisfy the flatness then store the last + // point and stop + tracer.LineTo(c[6], c[7]) + break + } + // Find the third control point deviation and the t values for subdivision + d = c.thirdControlPointDeviation() + t = 2 * math.Sqrt(flattening_threshold/d/3) + if t > 1 { + // Case where the t value calculated is invalid so using RS + c.Segment(tracer, flattening_threshold) + break + } + // Valid t value to subdivide at that calculated value + var b1, b2 CubicCurveFloat64 + c.SubdivideAt(&b1, &b2, t) + // First subsegment should have its deviation equal to flatness + dx = b1[6] - b1[0] + dy = b1[7] - b1[1] + + d2 = math.Abs(((b1[2]-b1[6])*dy - (b1[3]-b1[7])*dx)) + d3 = math.Abs(((b1[4]-b1[6])*dy - (b1[5]-b1[7])*dx)) + + if (d2+d3)*(d2+d3) > flattening_threshold*(dx*dx+dy*dy) { + // if not then use RS to handle any mathematical errors + b1.Segment(tracer, flattening_threshold) + } else { + tracer.LineTo(b1[6], b1[7]) + } + // repeat the process for the left over subsegment. + c = &b2 + } +} + +// Find the actual inflection points and return the number of inflection points found +// if 2 inflection points found, the first one returned will be with smaller t value. +func (curve *CubicCurveFloat64) findInflectionPoints() (int, firstIfp, secondIfp float64) { + // For Cubic Bezier curve with equation P=a*t^3 + b*t^2 + c*t + d + // slope of the curve dP/dt = 3*a*t^2 + 2*b*t + c + // a = (float)(-bez.p1 + 3*bez.p2 - 3*bez.p3 + bez.p4); + // b = (float)(3*bez.p1 - 6*bez.p2 + 3*bez.p3); + // c = (float)(-3*bez.p1 + 3*bez.p2); + ax := (-curve[0] + 3*curve[2] - 3*curve[4] + curve[6]) + bx := (3*curve[0] - 6*curve[2] + 3*curve[4]) + cx := (-3*curve[0] + 3*curve[2]) + ay := (-curve[1] + 3*curve[3] - 3*curve[5] + curve[7]) + by := (3*curve[1] - 6*curve[3] + 3*curve[5]) + cy := (-3*curve[1] + 3*curve[3]) + a := (3 * (ay*bx - ax*by)) + b := (3 * (ay*cx - ax*cy)) + c := (by*cx - bx*cy) + r2 := (b*b - 4*a*c) + firstIfp = 0.0 + secondIfp = 0.0 + if r2 >= 0.0 && a != 0.0 { + r := math.Sqrt(r2) + firstIfp = ((-b + r) / (2 * a)) + secondIfp = ((-b - r) / (2 * a)) + if (firstIfp > 0.0 && firstIfp < 1.0) && (secondIfp > 0.0 && secondIfp < 1.0) { + if firstIfp > secondIfp { + tmp := firstIfp + firstIfp = secondIfp + secondIfp = tmp + } + if secondIfp-firstIfp > 0.00001 { + return 2, firstIfp, secondIfp + } else { + return 1, firstIfp, secondIfp + } + } else if firstIfp > 0.0 && firstIfp < 1.0 { + return 1, firstIfp, secondIfp + } else if secondIfp > 0.0 && secondIfp < 1.0 { + firstIfp = secondIfp + return 1, firstIfp, secondIfp + } + return 0, firstIfp, secondIfp + } + return 0, firstIfp, secondIfp +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/curve_test.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/curve_test.go new file mode 100644 index 000000000..5e9eecac0 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/curve_test.go @@ -0,0 +1,262 @@ +package curve + +import ( + "bufio" + "code.google.com/p/draw2d/draw2d/raster" + "fmt" + "image" + "image/color" + "image/draw" + "image/png" + "log" + "os" + "testing" +) + +var ( + flattening_threshold float64 = 0.5 + testsCubicFloat64 = []CubicCurveFloat64{ + CubicCurveFloat64{100, 100, 200, 100, 100, 200, 200, 200}, + CubicCurveFloat64{100, 100, 300, 200, 200, 200, 300, 100}, + CubicCurveFloat64{100, 100, 0, 300, 200, 0, 300, 300}, + CubicCurveFloat64{150, 290, 10, 10, 290, 10, 150, 290}, + CubicCurveFloat64{10, 290, 10, 10, 290, 10, 290, 290}, + CubicCurveFloat64{100, 290, 290, 10, 10, 10, 200, 290}, + } + testsQuadFloat64 = []QuadCurveFloat64{ + QuadCurveFloat64{100, 100, 200, 100, 200, 200}, + QuadCurveFloat64{100, 100, 290, 200, 290, 100}, + QuadCurveFloat64{100, 100, 0, 290, 200, 290}, + QuadCurveFloat64{150, 290, 10, 10, 290, 290}, + QuadCurveFloat64{10, 290, 10, 10, 290, 290}, + QuadCurveFloat64{100, 290, 290, 10, 120, 290}, + } +) + +type Path struct { + points []float64 +} + +func (p *Path) LineTo(x, y float64) { + if len(p.points)+2 > cap(p.points) { + points := make([]float64, len(p.points)+2, len(p.points)+32) + copy(points, p.points) + p.points = points + } else { + p.points = p.points[0 : len(p.points)+2] + } + p.points[len(p.points)-2] = x + p.points[len(p.points)-1] = y +} + +func init() { + f, err := os.Create("_test.html") + if err != nil { + log.Println(err) + os.Exit(1) + } + defer f.Close() + log.Printf("Create html viewer") + f.Write([]byte("")) + for i := 0; i < len(testsCubicFloat64); i++ { + f.Write([]byte(fmt.Sprintf("
\n\n\n\n\n
\n", i, i, i, i, i))) + } + for i := 0; i < len(testsQuadFloat64); i++ { + f.Write([]byte(fmt.Sprintf("
\n
\n", i))) + } + f.Write([]byte("")) + +} + +func savepng(filePath string, m image.Image) { + f, err := os.Create(filePath) + if err != nil { + log.Println(err) + os.Exit(1) + } + defer f.Close() + b := bufio.NewWriter(f) + err = png.Encode(b, m) + if err != nil { + log.Println(err) + os.Exit(1) + } + err = b.Flush() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +func drawPoints(img draw.Image, c color.Color, s ...float64) image.Image { + /*for i := 0; i < len(s); i += 2 { + x, y := int(s[i]+0.5), int(s[i+1]+0.5) + img.Set(x, y, c) + img.Set(x, y+1, c) + img.Set(x, y-1, c) + img.Set(x+1, y, c) + img.Set(x+1, y+1, c) + img.Set(x+1, y-1, c) + img.Set(x-1, y, c) + img.Set(x-1, y+1, c) + img.Set(x-1, y-1, c) + + }*/ + return img +} + +func TestCubicCurveRec(t *testing.T) { + for i, curve := range testsCubicFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.SegmentRec(&p, flattening_threshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_testRec%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func TestCubicCurve(t *testing.T) { + for i, curve := range testsCubicFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.Segment(&p, flattening_threshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_test%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func TestCubicCurveAdaptiveRec(t *testing.T) { + for i, curve := range testsCubicFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.AdaptiveSegmentRec(&p, 1, 0, 0) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_testAdaptiveRec%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func TestCubicCurveAdaptive(t *testing.T) { + for i, curve := range testsCubicFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.AdaptiveSegment(&p, 1, 0, 0) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_testAdaptive%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func TestCubicCurveParabolic(t *testing.T) { + for i, curve := range testsCubicFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.ParabolicSegment(&p, flattening_threshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_testParabolic%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func TestQuadCurve(t *testing.T) { + for i, curve := range testsQuadFloat64 { + var p Path + p.LineTo(curve[0], curve[1]) + curve.Segment(&p, flattening_threshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + raster.PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, curve[:]...) + raster.PolylineBresenham(img, image.Black, p.points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.points...) + savepng(fmt.Sprintf("_testQuad%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.points)) + } + fmt.Println() +} + +func BenchmarkCubicCurveRec(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsCubicFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.SegmentRec(&p, flattening_threshold) + } + } +} + +func BenchmarkCubicCurve(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsCubicFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.Segment(&p, flattening_threshold) + } + } +} + +func BenchmarkCubicCurveAdaptiveRec(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsCubicFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.AdaptiveSegmentRec(&p, 1, 0, 0) + } + } +} + +func BenchmarkCubicCurveAdaptive(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsCubicFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.AdaptiveSegment(&p, 1, 0, 0) + } + } +} + +func BenchmarkCubicCurveParabolic(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsCubicFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.ParabolicSegment(&p, flattening_threshold) + } + } +} + +func BenchmarkQuadCurve(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, curve := range testsQuadFloat64 { + p := Path{make([]float64, 0, 32)} + p.LineTo(curve[0], curve[1]) + curve.Segment(&p, flattening_threshold) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/quad_float64.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/quad_float64.go new file mode 100644 index 000000000..bd72affbb --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curve/quad_float64.go @@ -0,0 +1,51 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 17/05/2011 by Laurent Le Goff +package curve + +import ( + "math" +) + +//X1, Y1, X2, Y2, X3, Y3 float64 +type QuadCurveFloat64 [6]float64 + +func (c *QuadCurveFloat64) Subdivide(c1, c2 *QuadCurveFloat64) { + // Calculate all the mid-points of the line segments + //---------------------- + c1[0], c1[1] = c[0], c[1] + c2[4], c2[5] = c[4], c[5] + c1[2] = (c[0] + c[2]) / 2 + c1[3] = (c[1] + c[3]) / 2 + c2[2] = (c[2] + c[4]) / 2 + c2[3] = (c[3] + c[5]) / 2 + c1[4] = (c1[2] + c2[2]) / 2 + c1[5] = (c1[3] + c2[3]) / 2 + c2[0], c2[1] = c1[4], c1[5] + return +} + +func (curve *QuadCurveFloat64) Segment(t LineTracer, flattening_threshold float64) { + var curves [CurveRecursionLimit]QuadCurveFloat64 + curves[0] = *curve + i := 0 + // current curve + var c *QuadCurveFloat64 + var dx, dy, d float64 + + for i >= 0 { + c = &curves[i] + dx = c[4] - c[0] + dy = c[5] - c[1] + + d = math.Abs(((c[2]-c[4])*dy - (c[3]-c[5])*dx)) + + if (d*d) < flattening_threshold*(dx*dx+dy*dy) || i == len(curves)-1 { + t.LineTo(c[4], c[5]) + i-- + } else { + // second half of bezier go lower onto the stack + c.Subdivide(&curves[i+1], &curves[i]) + i++ + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curves.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curves.go new file mode 100644 index 000000000..4623cd4dc --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/curves.go @@ -0,0 +1,336 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "math" +) + +var ( + CurveRecursionLimit = 32 + CurveCollinearityEpsilon = 1e-30 + CurveAngleToleranceEpsilon = 0.01 +) + +/* + The function has the following parameters: + approximationScale : + Eventually determines the approximation accuracy. In practice we need to transform points from the World coordinate system to the Screen one. + It always has some scaling coefficient. + The curves are usually processed in the World coordinates, while the approximation accuracy should be eventually in pixels. + Usually it looks as follows: + curved.approximationScale(transform.scale()); + where transform is the affine matrix that includes all the transformations, including viewport and zoom. + angleTolerance : + You set it in radians. + The less this value is the more accurate will be the approximation at sharp turns. + But 0 means that we don't consider angle conditions at all. + cuspLimit : + An angle in radians. + If 0, only the real cusps will have bevel cuts. + If more than 0, it will restrict the sharpness. + The more this value is the less sharp turns will be cut. + Typically it should not exceed 10-15 degrees. +*/ +func cubicBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, x4, y4, approximationScale, angleTolerance, cuspLimit float64) { + cuspLimit = computeCuspLimit(cuspLimit) + distanceToleranceSquare := 0.5 / approximationScale + distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare + recursiveCubicBezier(v, x1, y1, x2, y2, x3, y3, x4, y4, 0, distanceToleranceSquare, angleTolerance, cuspLimit) +} + +/* + * see cubicBezier comments for approximationScale and angleTolerance definition + */ +func quadraticBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, approximationScale, angleTolerance float64) { + distanceToleranceSquare := 0.5 / approximationScale + distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare + + recursiveQuadraticBezierBezier(v, x1, y1, x2, y2, x3, y3, 0, distanceToleranceSquare, angleTolerance) +} + +func computeCuspLimit(v float64) (r float64) { + if v == 0.0 { + r = 0.0 + } else { + r = math.Pi - v + } + return +} + +/** + * http://www.antigrain.com/research/adaptive_bezier/index.html + */ +func recursiveQuadraticBezierBezier(v VertexConverter, x1, y1, x2, y2, x3, y3 float64, level int, distanceToleranceSquare, angleTolerance float64) { + if level > CurveRecursionLimit { + return + } + + // Calculate all the mid-points of the line segments + //---------------------- + x12 := (x1 + x2) / 2 + y12 := (y1 + y2) / 2 + x23 := (x2 + x3) / 2 + y23 := (y2 + y3) / 2 + x123 := (x12 + x23) / 2 + y123 := (y12 + y23) / 2 + + dx := x3 - x1 + dy := y3 - y1 + d := math.Abs(((x2-x3)*dy - (y2-y3)*dx)) + + if d > CurveCollinearityEpsilon { + // Regular case + //----------------- + if d*d <= distanceToleranceSquare*(dx*dx+dy*dy) { + // If the curvature doesn't exceed the distanceTolerance value + // we tend to finish subdivisions. + //---------------------- + if angleTolerance < CurveAngleToleranceEpsilon { + v.Vertex(x123, y123) + return + } + + // Angle & Cusp Condition + //---------------------- + da := math.Abs(math.Atan2(y3-y2, x3-x2) - math.Atan2(y2-y1, x2-x1)) + if da >= math.Pi { + da = 2*math.Pi - da + } + + if da < angleTolerance { + // Finally we can stop the recursion + //---------------------- + v.Vertex(x123, y123) + return + } + } + } else { + // Collinear case + //------------------ + da := dx*dx + dy*dy + if da == 0 { + d = squareDistance(x1, y1, x2, y2) + } else { + d = ((x2-x1)*dx + (y2-y1)*dy) / da + if d > 0 && d < 1 { + // Simple collinear case, 1---2---3 + // We can leave just two endpoints + return + } + if d <= 0 { + d = squareDistance(x2, y2, x1, y1) + } else if d >= 1 { + d = squareDistance(x2, y2, x3, y3) + } else { + d = squareDistance(x2, y2, x1+d*dx, y1+d*dy) + } + } + if d < distanceToleranceSquare { + v.Vertex(x2, y2) + return + } + } + + // Continue subdivision + //---------------------- + recursiveQuadraticBezierBezier(v, x1, y1, x12, y12, x123, y123, level+1, distanceToleranceSquare, angleTolerance) + recursiveQuadraticBezierBezier(v, x123, y123, x23, y23, x3, y3, level+1, distanceToleranceSquare, angleTolerance) +} + +/** + * http://www.antigrain.com/research/adaptive_bezier/index.html + */ +func recursiveCubicBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, x4, y4 float64, level int, distanceToleranceSquare, angleTolerance, cuspLimit float64) { + if level > CurveRecursionLimit { + return + } + + // Calculate all the mid-points of the line segments + //---------------------- + x12 := (x1 + x2) / 2 + y12 := (y1 + y2) / 2 + x23 := (x2 + x3) / 2 + y23 := (y2 + y3) / 2 + x34 := (x3 + x4) / 2 + y34 := (y3 + y4) / 2 + x123 := (x12 + x23) / 2 + y123 := (y12 + y23) / 2 + x234 := (x23 + x34) / 2 + y234 := (y23 + y34) / 2 + x1234 := (x123 + x234) / 2 + y1234 := (y123 + y234) / 2 + + // Try to approximate the full cubic curve by a single straight line + //------------------ + dx := x4 - x1 + dy := y4 - y1 + + d2 := math.Abs(((x2-x4)*dy - (y2-y4)*dx)) + d3 := math.Abs(((x3-x4)*dy - (y3-y4)*dx)) + + switch { + case d2 <= CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // All collinear OR p1==p4 + //---------------------- + k := dx*dx + dy*dy + if k == 0 { + d2 = squareDistance(x1, y1, x2, y2) + d3 = squareDistance(x4, y4, x3, y3) + } else { + k = 1 / k + da1 := x2 - x1 + da2 := y2 - y1 + d2 = k * (da1*dx + da2*dy) + da1 = x3 - x1 + da2 = y3 - y1 + d3 = k * (da1*dx + da2*dy) + if d2 > 0 && d2 < 1 && d3 > 0 && d3 < 1 { + // Simple collinear case, 1---2---3---4 + // We can leave just two endpoints + return + } + if d2 <= 0 { + d2 = squareDistance(x2, y2, x1, y1) + } else if d2 >= 1 { + d2 = squareDistance(x2, y2, x4, y4) + } else { + d2 = squareDistance(x2, y2, x1+d2*dx, y1+d2*dy) + } + + if d3 <= 0 { + d3 = squareDistance(x3, y3, x1, y1) + } else if d3 >= 1 { + d3 = squareDistance(x3, y3, x4, y4) + } else { + d3 = squareDistance(x3, y3, x1+d3*dx, y1+d3*dy) + } + } + if d2 > d3 { + if d2 < distanceToleranceSquare { + v.Vertex(x2, y2) + return + } + } else { + if d3 < distanceToleranceSquare { + v.Vertex(x3, y3) + return + } + } + break + + case d2 <= CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // p1,p2,p4 are collinear, p3 is significant + //---------------------- + if d3*d3 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + v.Vertex(x23, y23) + return + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(y4-y3, x4-x3) - math.Atan2(y3-y2, x3-x2)) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + v.Vertex(x2, y2) + v.Vertex(x3, y3) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + v.Vertex(x3, y3) + return + } + } + } + break + + case d2 > CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: + // p1,p3,p4 are collinear, p2 is significant + //---------------------- + if d2*d2 <= distanceToleranceSquare*(dx*dx+dy*dy) { + if angleTolerance < CurveAngleToleranceEpsilon { + v.Vertex(x23, y23) + return + } + + // Angle Condition + //---------------------- + da1 := math.Abs(math.Atan2(y3-y2, x3-x2) - math.Atan2(y2-y1, x2-x1)) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + + if da1 < angleTolerance { + v.Vertex(x2, y2) + v.Vertex(x3, y3) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + v.Vertex(x2, y2) + return + } + } + } + break + + case d2 > CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: + // Regular case + //----------------- + if (d2+d3)*(d2+d3) <= distanceToleranceSquare*(dx*dx+dy*dy) { + // If the curvature doesn't exceed the distanceTolerance value + // we tend to finish subdivisions. + //---------------------- + if angleTolerance < CurveAngleToleranceEpsilon { + v.Vertex(x23, y23) + return + } + + // Angle & Cusp Condition + //---------------------- + k := math.Atan2(y3-y2, x3-x2) + da1 := math.Abs(k - math.Atan2(y2-y1, x2-x1)) + da2 := math.Abs(math.Atan2(y4-y3, x4-x3) - k) + if da1 >= math.Pi { + da1 = 2*math.Pi - da1 + } + if da2 >= math.Pi { + da2 = 2*math.Pi - da2 + } + + if da1+da2 < angleTolerance { + // Finally we can stop the recursion + //---------------------- + v.Vertex(x23, y23) + return + } + + if cuspLimit != 0.0 { + if da1 > cuspLimit { + v.Vertex(x2, y2) + return + } + + if da2 > cuspLimit { + v.Vertex(x3, y3) + return + } + } + } + break + } + + // Continue subdivision + //---------------------- + recursiveCubicBezier(v, x1, y1, x12, y12, x123, y123, x1234, y1234, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) + recursiveCubicBezier(v, x1234, y1234, x234, y234, x34, y34, x4, y4, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) + +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/dasher.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/dasher.go new file mode 100644 index 000000000..521029992 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/dasher.go @@ -0,0 +1,90 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +type DashVertexConverter struct { + command VertexCommand + next VertexConverter + x, y, distance float64 + dash []float64 + currentDash int + dashOffset float64 +} + +func NewDashConverter(dash []float64, dashOffset float64, converter VertexConverter) *DashVertexConverter { + var dasher DashVertexConverter + dasher.dash = dash + dasher.currentDash = 0 + dasher.dashOffset = dashOffset + dasher.next = converter + return &dasher +} + +func (dasher *DashVertexConverter) NextCommand(cmd VertexCommand) { + dasher.command = cmd + if dasher.command == VertexStopCommand { + dasher.next.NextCommand(VertexStopCommand) + } +} + +func (dasher *DashVertexConverter) Vertex(x, y float64) { + switch dasher.command { + case VertexStartCommand: + dasher.start(x, y) + default: + dasher.lineTo(x, y) + } + dasher.command = VertexNoCommand +} + +func (dasher *DashVertexConverter) start(x, y float64) { + dasher.next.NextCommand(VertexStartCommand) + dasher.next.Vertex(x, y) + dasher.x, dasher.y = x, y + dasher.distance = dasher.dashOffset + dasher.currentDash = 0 +} + +func (dasher *DashVertexConverter) lineTo(x, y float64) { + rest := dasher.dash[dasher.currentDash] - dasher.distance + for rest < 0 { + dasher.distance = dasher.distance - dasher.dash[dasher.currentDash] + dasher.currentDash = (dasher.currentDash + 1) % len(dasher.dash) + rest = dasher.dash[dasher.currentDash] - dasher.distance + } + d := distance(dasher.x, dasher.y, x, y) + for d >= rest { + k := rest / d + lx := dasher.x + k*(x-dasher.x) + ly := dasher.y + k*(y-dasher.y) + if dasher.currentDash%2 == 0 { + // line + dasher.next.Vertex(lx, ly) + } else { + // gap + dasher.next.NextCommand(VertexStopCommand) + dasher.next.NextCommand(VertexStartCommand) + dasher.next.Vertex(lx, ly) + } + d = d - rest + dasher.x, dasher.y = lx, ly + dasher.currentDash = (dasher.currentDash + 1) % len(dasher.dash) + rest = dasher.dash[dasher.currentDash] + } + dasher.distance = d + if dasher.currentDash%2 == 0 { + // line + dasher.next.Vertex(x, y) + } else { + // gap + dasher.next.NextCommand(VertexStopCommand) + dasher.next.NextCommand(VertexStartCommand) + dasher.next.Vertex(x, y) + } + if dasher.distance >= dasher.dash[dasher.currentDash] { + dasher.distance = dasher.distance - dasher.dash[dasher.currentDash] + dasher.currentDash = (dasher.currentDash + 1) % len(dasher.dash) + } + dasher.x, dasher.y = x, y +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/demux_converter.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/demux_converter.go new file mode 100644 index 000000000..b5c871d2c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/demux_converter.go @@ -0,0 +1,23 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +type DemuxConverter struct { + converters []VertexConverter +} + +func NewDemuxConverter(converters ...VertexConverter) *DemuxConverter { + return &DemuxConverter{converters} +} + +func (dc *DemuxConverter) NextCommand(cmd VertexCommand) { + for _, converter := range dc.converters { + converter.NextCommand(cmd) + } +} +func (dc *DemuxConverter) Vertex(x, y float64) { + for _, converter := range dc.converters { + converter.Vertex(x, y) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/doc.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/doc.go new file mode 100644 index 000000000..3baeffb4d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/doc.go @@ -0,0 +1,5 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +// The package draw2d provide a Graphic Context that can draw vectorial figure on surface. +package draw2d diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/font.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/font.go new file mode 100644 index 000000000..eb0b5325c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/font.go @@ -0,0 +1,97 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/truetype" + "io/ioutil" + "log" + "path" +) + +var ( + fontFolder = "../resource/font/" + fonts = make(map[string]*truetype.Font) +) + +type FontStyle byte + +const ( + FontStyleNormal FontStyle = iota + FontStyleBold + FontStyleItalic +) + +type FontFamily byte + +const ( + FontFamilySans FontFamily = iota + FontFamilySerif + FontFamilyMono +) + +type FontData struct { + Name string + Family FontFamily + Style FontStyle +} + +func fontFileName(fontData FontData) string { + fontFileName := fontData.Name + switch fontData.Family { + case FontFamilySans: + fontFileName += "s" + case FontFamilySerif: + fontFileName += "r" + case FontFamilyMono: + fontFileName += "m" + } + if fontData.Style&FontStyleBold != 0 { + fontFileName += "b" + } else { + fontFileName += "r" + } + + if fontData.Style&FontStyleItalic != 0 { + fontFileName += "i" + } + fontFileName += ".ttf" + return fontFileName +} + +func RegisterFont(fontData FontData, font *truetype.Font) { + fonts[fontFileName(fontData)] = font +} + +func GetFont(fontData FontData) *truetype.Font { + fontFileName := fontFileName(fontData) + font := fonts[fontFileName] + if font != nil { + return font + } + fonts[fontFileName] = loadFont(fontFileName) + return fonts[fontFileName] +} + +func GetFontFolder() string { + return fontFolder +} + +func SetFontFolder(folder string) { + fontFolder = folder +} + +func loadFont(fontFileName string) *truetype.Font { + fontBytes, err := ioutil.ReadFile(path.Join(fontFolder, fontFileName)) + if err != nil { + log.Println(err) + return nil + } + font, err := truetype.Parse(fontBytes) + if err != nil { + log.Println(err) + return nil + } + return font +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/gc.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/gc.go new file mode 100644 index 000000000..66dc5088f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/gc.go @@ -0,0 +1,55 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "image" + "image/color" +) + +type FillRule int + +const ( + FillRuleEvenOdd FillRule = iota + FillRuleWinding +) + +type GraphicContext interface { + Path + // Create a new path + BeginPath() + GetMatrixTransform() MatrixTransform + SetMatrixTransform(tr MatrixTransform) + ComposeMatrixTransform(tr MatrixTransform) + Rotate(angle float64) + Translate(tx, ty float64) + Scale(sx, sy float64) + SetStrokeColor(c color.Color) + SetFillColor(c color.Color) + SetFillRule(f FillRule) + SetLineWidth(lineWidth float64) + SetLineCap(cap Cap) + SetLineJoin(join Join) + SetLineDash(dash []float64, dashOffset float64) + SetFontSize(fontSize float64) + GetFontSize() float64 + SetFontData(fontData FontData) + GetFontData() FontData + DrawImage(image image.Image) + Save() + Restore() + Clear() + ClearRect(x1, y1, x2, y2 int) + SetDPI(dpi int) + GetDPI() int + GetStringBounds(s string) (left, top, right, bottom float64) + CreateStringPath(text string, x, y float64) (cursor float64) + FillString(text string) (cursor float64) + FillStringAt(text string, x, y float64) (cursor float64) + StrokeString(text string) (cursor float64) + StrokeStringAt(text string, x, y float64) (cursor float64) + Stroke(paths ...*PathStorage) + Fill(paths ...*PathStorage) + FillStroke(paths ...*PathStorage) +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/image.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/image.go new file mode 100644 index 000000000..9f91bc71f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/image.go @@ -0,0 +1,359 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/raster" + "code.google.com/p/freetype-go/freetype/truetype" + "errors" + "image" + "image/color" + "image/draw" + "log" + "math" +) + +type Painter interface { + raster.Painter + SetColor(color color.Color) +} + +var ( + defaultFontData = FontData{"luxi", FontFamilySans, FontStyleNormal} +) + +type ImageGraphicContext struct { + *StackGraphicContext + img draw.Image + painter Painter + fillRasterizer *raster.Rasterizer + strokeRasterizer *raster.Rasterizer + glyphBuf *truetype.GlyphBuf + DPI int +} + +/** + * Create a new Graphic context from an image + */ +func NewGraphicContext(img draw.Image) *ImageGraphicContext { + var painter Painter + switch selectImage := img.(type) { + case *image.RGBA: + painter = raster.NewRGBAPainter(selectImage) + default: + panic("Image type not supported") + } + return NewGraphicContextWithPainter(img, painter) +} + +// Create a new Graphic context from an image and a Painter (see Freetype-go) +func NewGraphicContextWithPainter(img draw.Image, painter Painter) *ImageGraphicContext { + width, height := img.Bounds().Dx(), img.Bounds().Dy() + dpi := 92 + gc := &ImageGraphicContext{ + NewStackGraphicContext(), + img, + painter, + raster.NewRasterizer(width, height), + raster.NewRasterizer(width, height), + truetype.NewGlyphBuf(), + dpi, + } + return gc +} + +func (gc *ImageGraphicContext) GetDPI() int { + return gc.DPI +} + +func (gc *ImageGraphicContext) Clear() { + width, height := gc.img.Bounds().Dx(), gc.img.Bounds().Dy() + gc.ClearRect(0, 0, width, height) +} + +func (gc *ImageGraphicContext) ClearRect(x1, y1, x2, y2 int) { + imageColor := image.NewUniform(gc.Current.FillColor) + draw.Draw(gc.img, image.Rect(x1, y1, x2, y2), imageColor, image.ZP, draw.Over) +} + +func (gc *ImageGraphicContext) DrawImage(img image.Image) { + DrawImage(img, gc.img, gc.Current.Tr, draw.Over, BilinearFilter) +} + +func (gc *ImageGraphicContext) FillString(text string) (cursor float64) { + return gc.FillStringAt(text, 0, 0) +} + +func (gc *ImageGraphicContext) FillStringAt(text string, x, y float64) (cursor float64) { + width := gc.CreateStringPath(text, x, y) + gc.Fill() + return width +} + +func (gc *ImageGraphicContext) StrokeString(text string) (cursor float64) { + return gc.StrokeStringAt(text, 0, 0) +} + +func (gc *ImageGraphicContext) StrokeStringAt(text string, x, y float64) (cursor float64) { + width := gc.CreateStringPath(text, x, y) + gc.Stroke() + return width +} + +func (gc *ImageGraphicContext) loadCurrentFont() (*truetype.Font, error) { + font := GetFont(gc.Current.FontData) + if font == nil { + font = GetFont(defaultFontData) + } + if font == nil { + return nil, errors.New("No font set, and no default font available.") + } + gc.SetFont(font) + gc.SetFontSize(gc.Current.FontSize) + return font, nil +} + +func fUnitsToFloat64(x int32) float64 { + scaled := x << 2 + return float64(scaled/256) + float64(scaled%256)/256.0 +} + +// p is a truetype.Point measured in FUnits and positive Y going upwards. +// The returned value is the same thing measured in floating point and positive Y +// going downwards. +func pointToF64Point(p truetype.Point) (x, y float64) { + return fUnitsToFloat64(p.X), -fUnitsToFloat64(p.Y) +} + +// drawContour draws the given closed contour at the given sub-pixel offset. +func (gc *ImageGraphicContext) drawContour(ps []truetype.Point, dx, dy float64) { + if len(ps) == 0 { + return + } + startX, startY := pointToF64Point(ps[0]) + gc.MoveTo(startX+dx, startY+dy) + q0X, q0Y, on0 := startX, startY, true + for _, p := range ps[1:] { + qX, qY := pointToF64Point(p) + on := p.Flags&0x01 != 0 + if on { + if on0 { + gc.LineTo(qX+dx, qY+dy) + } else { + gc.QuadCurveTo(q0X+dx, q0Y+dy, qX+dx, qY+dy) + } + } else { + if on0 { + // No-op. + } else { + midX := (q0X + qX) / 2 + midY := (q0Y + qY) / 2 + gc.QuadCurveTo(q0X+dx, q0Y+dy, midX+dx, midY+dy) + } + } + q0X, q0Y, on0 = qX, qY, on + } + // Close the curve. + if on0 { + gc.LineTo(startX+dx, startY+dy) + } else { + gc.QuadCurveTo(q0X+dx, q0Y+dy, startX+dx, startY+dy) + } +} + +func (gc *ImageGraphicContext) drawGlyph(glyph truetype.Index, dx, dy float64) error { + if err := gc.glyphBuf.Load(gc.Current.font, gc.Current.scale, glyph, truetype.NoHinting); err != nil { + return err + } + e0 := 0 + for _, e1 := range gc.glyphBuf.End { + gc.drawContour(gc.glyphBuf.Point[e0:e1], dx, dy) + e0 = e1 + } + return nil +} + +// CreateStringPath creates a path from the string s at x, y, and returns the string width. +// The text is placed so that the left edge of the em square of the first character of s +// and the baseline intersect at x, y. The majority of the affected pixels will be +// above and to the right of the point, but some may be below or to the left. +// For example, drawing a string that starts with a 'J' in an italic font may +// affect pixels below and left of the point. +func (gc *ImageGraphicContext) CreateStringPath(s string, x, y float64) float64 { + font, err := gc.loadCurrentFont() + if err != nil { + log.Println(err) + return 0.0 + } + startx := x + prev, hasPrev := truetype.Index(0), false + for _, rune := range s { + index := font.Index(rune) + if hasPrev { + x += fUnitsToFloat64(font.Kerning(gc.Current.scale, prev, index)) + } + err := gc.drawGlyph(index, x, y) + if err != nil { + log.Println(err) + return startx - x + } + x += fUnitsToFloat64(font.HMetric(gc.Current.scale, index).AdvanceWidth) + prev, hasPrev = index, true + } + return x - startx +} + +// GetStringBounds returns the approximate pixel bounds of the string s at x, y. +// The the left edge of the em square of the first character of s +// and the baseline intersect at 0, 0 in the returned coordinates. +// Therefore the top and left coordinates may well be negative. +func (gc *ImageGraphicContext) GetStringBounds(s string) (left, top, right, bottom float64) { + font, err := gc.loadCurrentFont() + if err != nil { + log.Println(err) + return 0, 0, 0, 0 + } + top, left, bottom, right = 10e6, 10e6, -10e6, -10e6 + cursor := 0.0 + prev, hasPrev := truetype.Index(0), false + for _, rune := range s { + index := font.Index(rune) + if hasPrev { + cursor += fUnitsToFloat64(font.Kerning(gc.Current.scale, prev, index)) + } + if err := gc.glyphBuf.Load(gc.Current.font, gc.Current.scale, index, truetype.NoHinting); err != nil { + log.Println(err) + return 0, 0, 0, 0 + } + e0 := 0 + for _, e1 := range gc.glyphBuf.End { + ps := gc.glyphBuf.Point[e0:e1] + for _, p := range ps { + x, y := pointToF64Point(p) + top = math.Min(top, y) + bottom = math.Max(bottom, y) + left = math.Min(left, x+cursor) + right = math.Max(right, x+cursor) + } + } + cursor += fUnitsToFloat64(font.HMetric(gc.Current.scale, index).AdvanceWidth) + prev, hasPrev = index, true + } + return left, top, right, bottom +} + +// recalc recalculates scale and bounds values from the font size, screen +// resolution and font metrics, and invalidates the glyph cache. +func (gc *ImageGraphicContext) recalc() { + gc.Current.scale = int32(gc.Current.FontSize * float64(gc.DPI) * (64.0 / 72.0)) +} + +// SetDPI sets the screen resolution in dots per inch. +func (gc *ImageGraphicContext) SetDPI(dpi int) { + gc.DPI = dpi + gc.recalc() +} + +// SetFont sets the font used to draw text. +func (gc *ImageGraphicContext) SetFont(font *truetype.Font) { + gc.Current.font = font +} + +// SetFontSize sets the font size in points (as in ``a 12 point font''). +func (gc *ImageGraphicContext) SetFontSize(fontSize float64) { + gc.Current.FontSize = fontSize + gc.recalc() +} + +func (gc *ImageGraphicContext) paint(rasterizer *raster.Rasterizer, color color.Color) { + gc.painter.SetColor(color) + rasterizer.Rasterize(gc.painter) + rasterizer.Clear() + gc.Current.Path.Clear() +} + +/**** second method ****/ +func (gc *ImageGraphicContext) Stroke(paths ...*PathStorage) { + paths = append(paths, gc.Current.Path) + gc.strokeRasterizer.UseNonZeroWinding = true + + stroker := NewLineStroker(gc.Current.Cap, gc.Current.Join, NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.strokeRasterizer))) + stroker.HalfLineWidth = gc.Current.LineWidth / 2 + var pathConverter *PathConverter + if gc.Current.Dash != nil && len(gc.Current.Dash) > 0 { + dasher := NewDashConverter(gc.Current.Dash, gc.Current.DashOffset, stroker) + pathConverter = NewPathConverter(dasher) + } else { + pathConverter = NewPathConverter(stroker) + } + pathConverter.ApproximationScale = gc.Current.Tr.GetScale() + pathConverter.Convert(paths...) + + gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) +} + +/**** second method ****/ +func (gc *ImageGraphicContext) Fill(paths ...*PathStorage) { + paths = append(paths, gc.Current.Path) + gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule.UseNonZeroWinding() + + /**** first method ****/ + pathConverter := NewPathConverter(NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.fillRasterizer))) + pathConverter.ApproximationScale = gc.Current.Tr.GetScale() + pathConverter.Convert(paths...) + + gc.paint(gc.fillRasterizer, gc.Current.FillColor) +} + +/* second method */ +func (gc *ImageGraphicContext) FillStroke(paths ...*PathStorage) { + gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule.UseNonZeroWinding() + gc.strokeRasterizer.UseNonZeroWinding = true + + filler := NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.fillRasterizer)) + + stroker := NewLineStroker(gc.Current.Cap, gc.Current.Join, NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.strokeRasterizer))) + stroker.HalfLineWidth = gc.Current.LineWidth / 2 + + demux := NewDemuxConverter(filler, stroker) + paths = append(paths, gc.Current.Path) + pathConverter := NewPathConverter(demux) + pathConverter.ApproximationScale = gc.Current.Tr.GetScale() + pathConverter.Convert(paths...) + + gc.paint(gc.fillRasterizer, gc.Current.FillColor) + gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) +} + +func (f FillRule) UseNonZeroWinding() bool { + switch f { + case FillRuleEvenOdd: + return false + case FillRuleWinding: + return true + } + return false +} + +func (c Cap) Convert() raster.Capper { + switch c { + case RoundCap: + return raster.RoundCapper + case ButtCap: + return raster.ButtCapper + case SquareCap: + return raster.SquareCapper + } + return raster.RoundCapper +} + +func (j Join) Convert() raster.Joiner { + switch j { + case RoundJoin: + return raster.RoundJoiner + case BevelJoin: + return raster.BevelJoiner + } + return raster.RoundJoiner +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/math.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/math.go new file mode 100644 index 000000000..c4bb761df --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/math.go @@ -0,0 +1,52 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "math" +) + +func distance(x1, y1, x2, y2 float64) float64 { + dx := x2 - x1 + dy := y2 - y1 + return float64(math.Sqrt(dx*dx + dy*dy)) +} + +func vectorDistance(dx, dy float64) float64 { + return float64(math.Sqrt(dx*dx + dy*dy)) +} + +func squareDistance(x1, y1, x2, y2 float64) float64 { + dx := x2 - x1 + dy := y2 - y1 + return dx*dx + dy*dy +} + +func min(x, y float64) float64 { + if x < y { + return x + } + return y +} + +func max(x, y float64) float64 { + if x > y { + return x + } + return y +} + +func minMax(x, y float64) (min, max float64) { + if x > y { + return y, x + } + return x, y +} + +func minUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/paint.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/paint.go new file mode 100644 index 000000000..885d993ae --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/paint.go @@ -0,0 +1,92 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +/* +import ( + "image/draw" + "image" + "freetype-go.googlecode.com/hg/freetype/raster" +)*/ + +const M = 1<<16 - 1 + +/* +type NRGBAPainter struct { + // The image to compose onto. + Image *image.NRGBA + // The Porter-Duff composition operator. + Op draw.Op + // The 16-bit color to paint the spans. + cr, cg, cb, ca uint32 +} + +// Paint satisfies the Painter interface by painting ss onto an image.RGBA. +func (r *NRGBAPainter) Paint(ss []raster.Span, done bool) { + b := r.Image.Bounds() + for _, s := range ss { + if s.Y < b.Min.Y { + continue + } + if s.Y >= b.Max.Y { + return + } + if s.X0 < b.Min.X { + s.X0 = b.Min.X + } + if s.X1 > b.Max.X { + s.X1 = b.Max.X + } + if s.X0 >= s.X1 { + continue + } + base := s.Y * r.Image.Stride + p := r.Image.Pix[base+s.X0 : base+s.X1] + // This code is duplicated from drawGlyphOver in $GOROOT/src/pkg/image/draw/draw.go. + // TODO(nigeltao): Factor out common code into a utility function, once the compiler + // can inline such function calls. + ma := s.A >> 16 + if r.Op == draw.Over { + for i, nrgba := range p { + dr, dg, db, da := nrgba. + a := M - (r.ca*ma)/M + da = (da*a + r.ca*ma) / M + if da != 0 { + dr = minUint32(M, (dr*a+r.cr*ma)/da) + dg = minUint32(M, (dg*a+r.cg*ma)/da) + db = minUint32(M, (db*a+r.cb*ma)/da) + } else { + dr, dg, db = 0, 0, 0 + } + p[i] = image.NRGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + } else { + for i, nrgba := range p { + dr, dg, db, da := nrgba.RGBA() + a := M - ma + da = (da*a + r.ca*ma) / M + if da != 0 { + dr = minUint32(M, (dr*a+r.cr*ma)/da) + dg = minUint32(M, (dg*a+r.cg*ma)/da) + db = minUint32(M, (db*a+r.cb*ma)/da) + } else { + dr, dg, db = 0, 0, 0 + } + p[i] = image.NRGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + } + } + +} + +// SetColor sets the color to paint the spans. +func (r *NRGBAPainter) SetColor(c image.Color) { + r.cr, r.cg, r.cb, r.ca = c.RGBA() +} + +// NewRGBAPainter creates a new RGBAPainter for the given image. +func NewNRGBAPainter(m *image.NRGBA) *NRGBAPainter { + return &NRGBAPainter{Image: m} +} +*/ diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path.go new file mode 100644 index 000000000..b82910e24 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path.go @@ -0,0 +1,27 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +type Path interface { + // Return the current point of the path + LastPoint() (x, y float64) + // Create a new subpath that start at the specified point + MoveTo(x, y float64) + // Create a new subpath that start at the specified point + // relative to the current point + RMoveTo(dx, dy float64) + // Add a line to the current subpath + LineTo(x, y float64) + // Add a line to the current subpath + // relative to the current point + RLineTo(dx, dy float64) + + QuadCurveTo(cx, cy, x, y float64) + RQuadCurveTo(dcx, dcy, dx, dy float64) + CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) + RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) + ArcTo(cx, cy, rx, ry, startAngle, angle float64) + RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) + Close() +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_adder.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_adder.go new file mode 100644 index 000000000..c5efd2beb --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_adder.go @@ -0,0 +1,70 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/raster" +) + +type VertexAdder struct { + command VertexCommand + adder raster.Adder +} + +func NewVertexAdder(adder raster.Adder) *VertexAdder { + return &VertexAdder{VertexNoCommand, adder} +} + +func (vertexAdder *VertexAdder) NextCommand(cmd VertexCommand) { + vertexAdder.command = cmd +} + +func (vertexAdder *VertexAdder) Vertex(x, y float64) { + switch vertexAdder.command { + case VertexStartCommand: + vertexAdder.adder.Start(raster.Point{raster.Fix32(x * 256), raster.Fix32(y * 256)}) + default: + vertexAdder.adder.Add1(raster.Point{raster.Fix32(x * 256), raster.Fix32(y * 256)}) + } + vertexAdder.command = VertexNoCommand +} + +type PathAdder struct { + adder raster.Adder + firstPoint raster.Point + ApproximationScale float64 +} + +func NewPathAdder(adder raster.Adder) *PathAdder { + return &PathAdder{adder, raster.Point{0, 0}, 1} +} + +func (pathAdder *PathAdder) Convert(paths ...*PathStorage) { + for _, path := range paths { + j := 0 + for _, cmd := range path.commands { + switch cmd { + case MoveTo: + pathAdder.firstPoint = raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)} + pathAdder.adder.Start(pathAdder.firstPoint) + j += 2 + case LineTo: + pathAdder.adder.Add1(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}) + j += 2 + case QuadCurveTo: + pathAdder.adder.Add2(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}, raster.Point{raster.Fix32(path.vertices[j+2] * 256), raster.Fix32(path.vertices[j+3] * 256)}) + j += 4 + case CubicCurveTo: + pathAdder.adder.Add3(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}, raster.Point{raster.Fix32(path.vertices[j+2] * 256), raster.Fix32(path.vertices[j+3] * 256)}, raster.Point{raster.Fix32(path.vertices[j+4] * 256), raster.Fix32(path.vertices[j+5] * 256)}) + j += 6 + case ArcTo: + lastPoint := arcAdder(pathAdder.adder, path.vertices[j], path.vertices[j+1], path.vertices[j+2], path.vertices[j+3], path.vertices[j+4], path.vertices[j+5], pathAdder.ApproximationScale) + pathAdder.adder.Add1(lastPoint) + j += 6 + case Close: + pathAdder.adder.Add1(pathAdder.firstPoint) + } + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_converter.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_converter.go new file mode 100644 index 000000000..0ef96b84d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_converter.go @@ -0,0 +1,173 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 06/12/2010 by Laurent Le Goff + +package draw2d + +import ( + "math" +) + +type PathConverter struct { + converter VertexConverter + ApproximationScale, AngleTolerance, CuspLimit float64 + startX, startY, x, y float64 +} + +func NewPathConverter(converter VertexConverter) *PathConverter { + return &PathConverter{converter, 1, 0, 0, 0, 0, 0, 0} +} + +func (c *PathConverter) Convert(paths ...*PathStorage) { + for _, path := range paths { + j := 0 + for _, cmd := range path.commands { + j = j + c.ConvertCommand(cmd, path.vertices[j:]...) + } + c.converter.NextCommand(VertexStopCommand) + } +} + +func (c *PathConverter) ConvertCommand(cmd PathCmd, vertices ...float64) int { + switch cmd { + case MoveTo: + c.x, c.y = vertices[0], vertices[1] + c.startX, c.startY = c.x, c.y + c.converter.NextCommand(VertexStopCommand) + c.converter.NextCommand(VertexStartCommand) + c.converter.Vertex(c.x, c.y) + return 2 + case LineTo: + c.x, c.y = vertices[0], vertices[1] + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + c.converter.NextCommand(VertexJoinCommand) + return 2 + case QuadCurveTo: + quadraticBezier(c.converter, c.x, c.y, vertices[0], vertices[1], vertices[2], vertices[3], c.ApproximationScale, c.AngleTolerance) + c.x, c.y = vertices[2], vertices[3] + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return 4 + case CubicCurveTo: + cubicBezier(c.converter, c.x, c.y, vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], c.ApproximationScale, c.AngleTolerance, c.CuspLimit) + c.x, c.y = vertices[4], vertices[5] + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return 6 + case ArcTo: + c.x, c.y = arc(c.converter, vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], c.ApproximationScale) + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return 6 + case Close: + c.converter.NextCommand(VertexCloseCommand) + c.converter.Vertex(c.startX, c.startY) + return 0 + } + return 0 +} + +func (c *PathConverter) MoveTo(x, y float64) *PathConverter { + c.x, c.y = x, y + c.startX, c.startY = c.x, c.y + c.converter.NextCommand(VertexStopCommand) + c.converter.NextCommand(VertexStartCommand) + c.converter.Vertex(c.x, c.y) + return c +} + +func (c *PathConverter) RMoveTo(dx, dy float64) *PathConverter { + c.MoveTo(c.x+dx, c.y+dy) + return c +} + +func (c *PathConverter) LineTo(x, y float64) *PathConverter { + c.x, c.y = x, y + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + c.converter.NextCommand(VertexJoinCommand) + return c +} + +func (c *PathConverter) RLineTo(dx, dy float64) *PathConverter { + c.LineTo(c.x+dx, c.y+dy) + return c +} + +func (c *PathConverter) QuadCurveTo(cx, cy, x, y float64) *PathConverter { + quadraticBezier(c.converter, c.x, c.y, cx, cy, x, y, c.ApproximationScale, c.AngleTolerance) + c.x, c.y = x, y + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return c +} + +func (c *PathConverter) RQuadCurveTo(dcx, dcy, dx, dy float64) *PathConverter { + c.QuadCurveTo(c.x+dcx, c.y+dcy, c.x+dx, c.y+dy) + return c +} + +func (c *PathConverter) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) *PathConverter { + cubicBezier(c.converter, c.x, c.y, cx1, cy1, cx2, cy2, x, y, c.ApproximationScale, c.AngleTolerance, c.CuspLimit) + c.x, c.y = x, y + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return c +} + +func (c *PathConverter) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) *PathConverter { + c.CubicCurveTo(c.x+dcx1, c.y+dcy1, c.x+dcx2, c.y+dcy2, c.x+dx, c.y+dy) + return c +} + +func (c *PathConverter) ArcTo(cx, cy, rx, ry, startAngle, angle float64) *PathConverter { + endAngle := startAngle + angle + clockWise := true + if angle < 0 { + clockWise = false + } + // normalize + if clockWise { + for endAngle < startAngle { + endAngle += math.Pi * 2.0 + } + } else { + for startAngle < endAngle { + startAngle += math.Pi * 2.0 + } + } + startX := cx + math.Cos(startAngle)*rx + startY := cy + math.Sin(startAngle)*ry + c.MoveTo(startX, startY) + c.x, c.y = arc(c.converter, cx, cy, rx, ry, startAngle, angle, c.ApproximationScale) + if c.startX == c.x && c.startY == c.y { + c.converter.NextCommand(VertexCloseCommand) + } + c.converter.Vertex(c.x, c.y) + return c +} + +func (c *PathConverter) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) *PathConverter { + c.ArcTo(c.x+dcx, c.y+dcy, rx, ry, startAngle, angle) + return c +} + +func (c *PathConverter) Close() *PathConverter { + c.converter.NextCommand(VertexCloseCommand) + c.converter.Vertex(c.startX, c.startY) + return c +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_storage.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_storage.go new file mode 100644 index 000000000..c2a887037 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/path_storage.go @@ -0,0 +1,190 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "fmt" + "math" +) + +type PathCmd int + +const ( + MoveTo PathCmd = iota + LineTo + QuadCurveTo + CubicCurveTo + ArcTo + Close +) + +type PathStorage struct { + commands []PathCmd + vertices []float64 + x, y float64 +} + +func NewPathStorage() (p *PathStorage) { + p = new(PathStorage) + p.commands = make([]PathCmd, 0, 256) + p.vertices = make([]float64, 0, 256) + return +} + +func (p *PathStorage) Clear() { + p.commands = p.commands[0:0] + p.vertices = p.vertices[0:0] + return +} + +func (p *PathStorage) appendToPath(cmd PathCmd, vertices ...float64) { + if cap(p.vertices) <= len(p.vertices)+6 { + a := make([]PathCmd, len(p.commands), cap(p.commands)+256) + b := make([]float64, len(p.vertices), cap(p.vertices)+256) + copy(a, p.commands) + p.commands = a + copy(b, p.vertices) + p.vertices = b + } + p.commands = p.commands[0 : len(p.commands)+1] + p.commands[len(p.commands)-1] = cmd + copy(p.vertices[len(p.vertices):len(p.vertices)+len(vertices)], vertices) + p.vertices = p.vertices[0 : len(p.vertices)+len(vertices)] +} + +func (src *PathStorage) Copy() (dest *PathStorage) { + dest = new(PathStorage) + dest.commands = make([]PathCmd, len(src.commands)) + copy(dest.commands, src.commands) + dest.vertices = make([]float64, len(src.vertices)) + copy(dest.vertices, src.vertices) + return dest +} + +func (p *PathStorage) LastPoint() (x, y float64) { + return p.x, p.y +} + +func (p *PathStorage) IsEmpty() bool { + return len(p.commands) == 0 +} + +func (p *PathStorage) Close() *PathStorage { + p.appendToPath(Close) + return p +} + +func (p *PathStorage) MoveTo(x, y float64) *PathStorage { + p.appendToPath(MoveTo, x, y) + p.x = x + p.y = y + return p +} + +func (p *PathStorage) RMoveTo(dx, dy float64) *PathStorage { + x, y := p.LastPoint() + p.MoveTo(x+dx, y+dy) + return p +} + +func (p *PathStorage) LineTo(x, y float64) *PathStorage { + p.appendToPath(LineTo, x, y) + p.x = x + p.y = y + return p +} + +func (p *PathStorage) RLineTo(dx, dy float64) *PathStorage { + x, y := p.LastPoint() + p.LineTo(x+dx, y+dy) + return p +} + +func (p *PathStorage) QuadCurveTo(cx, cy, x, y float64) *PathStorage { + p.appendToPath(QuadCurveTo, cx, cy, x, y) + p.x = x + p.y = y + return p +} + +func (p *PathStorage) RQuadCurveTo(dcx, dcy, dx, dy float64) *PathStorage { + x, y := p.LastPoint() + p.QuadCurveTo(x+dcx, y+dcy, x+dx, y+dy) + return p +} + +func (p *PathStorage) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) *PathStorage { + p.appendToPath(CubicCurveTo, cx1, cy1, cx2, cy2, x, y) + p.x = x + p.y = y + return p +} + +func (p *PathStorage) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) *PathStorage { + x, y := p.LastPoint() + p.CubicCurveTo(x+dcx1, y+dcy1, x+dcx2, y+dcy2, x+dx, y+dy) + return p +} + +func (p *PathStorage) ArcTo(cx, cy, rx, ry, startAngle, angle float64) *PathStorage { + endAngle := startAngle + angle + clockWise := true + if angle < 0 { + clockWise = false + } + // normalize + if clockWise { + for endAngle < startAngle { + endAngle += math.Pi * 2.0 + } + } else { + for startAngle < endAngle { + startAngle += math.Pi * 2.0 + } + } + startX := cx + math.Cos(startAngle)*rx + startY := cy + math.Sin(startAngle)*ry + if len(p.commands) > 0 { + p.LineTo(startX, startY) + } else { + p.MoveTo(startX, startY) + } + p.appendToPath(ArcTo, cx, cy, rx, ry, startAngle, angle) + p.x = cx + math.Cos(endAngle)*rx + p.y = cy + math.Sin(endAngle)*ry + return p +} + +func (p *PathStorage) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) *PathStorage { + x, y := p.LastPoint() + p.ArcTo(x+dcx, y+dcy, rx, ry, startAngle, angle) + return p +} + +func (p *PathStorage) String() string { + s := "" + j := 0 + for _, cmd := range p.commands { + switch cmd { + case MoveTo: + s += fmt.Sprintf("MoveTo: %f, %f\n", p.vertices[j], p.vertices[j+1]) + j = j + 2 + case LineTo: + s += fmt.Sprintf("LineTo: %f, %f\n", p.vertices[j], p.vertices[j+1]) + j = j + 2 + case QuadCurveTo: + s += fmt.Sprintf("QuadCurveTo: %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3]) + j = j + 4 + case CubicCurveTo: + s += fmt.Sprintf("CubicCurveTo: %f, %f, %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3], p.vertices[j+4], p.vertices[j+5]) + j = j + 6 + case ArcTo: + s += fmt.Sprintf("ArcTo: %f, %f, %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3], p.vertices[j+4], p.vertices[j+5]) + j = j + 6 + case Close: + s += "Close\n" + } + } + return s +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/coverage_table.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/coverage_table.go new file mode 100644 index 000000000..429836f39 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/coverage_table.go @@ -0,0 +1,203 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +var SUBPIXEL_OFFSETS_SAMPLE_8 = [8]float64{ + 5.0 / 8, + 0.0 / 8, + 3.0 / 8, + 6.0 / 8, + 1.0 / 8, + 4.0 / 8, + 7.0 / 8, + 2.0 / 8, +} + +var SUBPIXEL_OFFSETS_SAMPLE_8_FIXED = [8]Fix{ + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[0] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[1] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[2] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[3] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[4] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[5] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[6] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_8[7] * FIXED_FLOAT_COEF), +} + +var SUBPIXEL_OFFSETS_SAMPLE_16 = [16]float64{ + 1.0 / 16, + 8.0 / 16, + 4.0 / 16, + 15.0 / 16, + 11.0 / 16, + 2.0 / 16, + 6.0 / 16, + 14.0 / 16, + 10.0 / 16, + 3.0 / 16, + 7.0 / 16, + 12.0 / 16, + 0.0 / 16, + 9.0 / 16, + 5.0 / 16, + 13.0 / 16, +} + +var SUBPIXEL_OFFSETS_SAMPLE_16_FIXED = [16]Fix{ + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[0] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[1] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[2] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[3] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[4] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[5] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[6] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[7] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[8] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[9] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[10] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[11] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[12] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[13] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[14] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_16[15] * FIXED_FLOAT_COEF), +} + +var SUBPIXEL_OFFSETS_SAMPLE_32 = [32]float64{ + 28.0 / 32, + 13.0 / 32, + 6.0 / 32, + 23.0 / 32, + 0.0 / 32, + 17.0 / 32, + 10.0 / 32, + 27.0 / 32, + 4.0 / 32, + 21.0 / 32, + 14.0 / 32, + 31.0 / 32, + 8.0 / 32, + 25.0 / 32, + 18.0 / 32, + 3.0 / 32, + 12.0 / 32, + 29.0 / 32, + 22.0 / 32, + 7.0 / 32, + 16.0 / 32, + 1.0 / 32, + 26.0 / 32, + 11.0 / 32, + 20.0 / 32, + 5.0 / 32, + 30.0 / 32, + 15.0 / 32, + 24.0 / 32, + 9.0 / 32, + 2.0 / 32, + 19.0 / 32, +} +var SUBPIXEL_OFFSETS_SAMPLE_32_FIXED = [32]Fix{ + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[0] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[1] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[2] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[3] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[4] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[5] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[6] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[7] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[8] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[9] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[10] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[11] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[12] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[13] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[14] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[15] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[16] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[17] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[18] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[19] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[20] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[21] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[22] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[23] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[24] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[25] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[26] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[27] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[28] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[29] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[30] * FIXED_FLOAT_COEF), + Fix(SUBPIXEL_OFFSETS_SAMPLE_32[31] * FIXED_FLOAT_COEF), +} + +var coverageTable = [256]uint8{ + pixelCoverage(0x00), pixelCoverage(0x01), pixelCoverage(0x02), pixelCoverage(0x03), + pixelCoverage(0x04), pixelCoverage(0x05), pixelCoverage(0x06), pixelCoverage(0x07), + pixelCoverage(0x08), pixelCoverage(0x09), pixelCoverage(0x0a), pixelCoverage(0x0b), + pixelCoverage(0x0c), pixelCoverage(0x0d), pixelCoverage(0x0e), pixelCoverage(0x0f), + pixelCoverage(0x10), pixelCoverage(0x11), pixelCoverage(0x12), pixelCoverage(0x13), + pixelCoverage(0x14), pixelCoverage(0x15), pixelCoverage(0x16), pixelCoverage(0x17), + pixelCoverage(0x18), pixelCoverage(0x19), pixelCoverage(0x1a), pixelCoverage(0x1b), + pixelCoverage(0x1c), pixelCoverage(0x1d), pixelCoverage(0x1e), pixelCoverage(0x1f), + pixelCoverage(0x20), pixelCoverage(0x21), pixelCoverage(0x22), pixelCoverage(0x23), + pixelCoverage(0x24), pixelCoverage(0x25), pixelCoverage(0x26), pixelCoverage(0x27), + pixelCoverage(0x28), pixelCoverage(0x29), pixelCoverage(0x2a), pixelCoverage(0x2b), + pixelCoverage(0x2c), pixelCoverage(0x2d), pixelCoverage(0x2e), pixelCoverage(0x2f), + pixelCoverage(0x30), pixelCoverage(0x31), pixelCoverage(0x32), pixelCoverage(0x33), + pixelCoverage(0x34), pixelCoverage(0x35), pixelCoverage(0x36), pixelCoverage(0x37), + pixelCoverage(0x38), pixelCoverage(0x39), pixelCoverage(0x3a), pixelCoverage(0x3b), + pixelCoverage(0x3c), pixelCoverage(0x3d), pixelCoverage(0x3e), pixelCoverage(0x3f), + pixelCoverage(0x40), pixelCoverage(0x41), pixelCoverage(0x42), pixelCoverage(0x43), + pixelCoverage(0x44), pixelCoverage(0x45), pixelCoverage(0x46), pixelCoverage(0x47), + pixelCoverage(0x48), pixelCoverage(0x49), pixelCoverage(0x4a), pixelCoverage(0x4b), + pixelCoverage(0x4c), pixelCoverage(0x4d), pixelCoverage(0x4e), pixelCoverage(0x4f), + pixelCoverage(0x50), pixelCoverage(0x51), pixelCoverage(0x52), pixelCoverage(0x53), + pixelCoverage(0x54), pixelCoverage(0x55), pixelCoverage(0x56), pixelCoverage(0x57), + pixelCoverage(0x58), pixelCoverage(0x59), pixelCoverage(0x5a), pixelCoverage(0x5b), + pixelCoverage(0x5c), pixelCoverage(0x5d), pixelCoverage(0x5e), pixelCoverage(0x5f), + pixelCoverage(0x60), pixelCoverage(0x61), pixelCoverage(0x62), pixelCoverage(0x63), + pixelCoverage(0x64), pixelCoverage(0x65), pixelCoverage(0x66), pixelCoverage(0x67), + pixelCoverage(0x68), pixelCoverage(0x69), pixelCoverage(0x6a), pixelCoverage(0x6b), + pixelCoverage(0x6c), pixelCoverage(0x6d), pixelCoverage(0x6e), pixelCoverage(0x6f), + pixelCoverage(0x70), pixelCoverage(0x71), pixelCoverage(0x72), pixelCoverage(0x73), + pixelCoverage(0x74), pixelCoverage(0x75), pixelCoverage(0x76), pixelCoverage(0x77), + pixelCoverage(0x78), pixelCoverage(0x79), pixelCoverage(0x7a), pixelCoverage(0x7b), + pixelCoverage(0x7c), pixelCoverage(0x7d), pixelCoverage(0x7e), pixelCoverage(0x7f), + pixelCoverage(0x80), pixelCoverage(0x81), pixelCoverage(0x82), pixelCoverage(0x83), + pixelCoverage(0x84), pixelCoverage(0x85), pixelCoverage(0x86), pixelCoverage(0x87), + pixelCoverage(0x88), pixelCoverage(0x89), pixelCoverage(0x8a), pixelCoverage(0x8b), + pixelCoverage(0x8c), pixelCoverage(0x8d), pixelCoverage(0x8e), pixelCoverage(0x8f), + pixelCoverage(0x90), pixelCoverage(0x91), pixelCoverage(0x92), pixelCoverage(0x93), + pixelCoverage(0x94), pixelCoverage(0x95), pixelCoverage(0x96), pixelCoverage(0x97), + pixelCoverage(0x98), pixelCoverage(0x99), pixelCoverage(0x9a), pixelCoverage(0x9b), + pixelCoverage(0x9c), pixelCoverage(0x9d), pixelCoverage(0x9e), pixelCoverage(0x9f), + pixelCoverage(0xa0), pixelCoverage(0xa1), pixelCoverage(0xa2), pixelCoverage(0xa3), + pixelCoverage(0xa4), pixelCoverage(0xa5), pixelCoverage(0xa6), pixelCoverage(0xa7), + pixelCoverage(0xa8), pixelCoverage(0xa9), pixelCoverage(0xaa), pixelCoverage(0xab), + pixelCoverage(0xac), pixelCoverage(0xad), pixelCoverage(0xae), pixelCoverage(0xaf), + pixelCoverage(0xb0), pixelCoverage(0xb1), pixelCoverage(0xb2), pixelCoverage(0xb3), + pixelCoverage(0xb4), pixelCoverage(0xb5), pixelCoverage(0xb6), pixelCoverage(0xb7), + pixelCoverage(0xb8), pixelCoverage(0xb9), pixelCoverage(0xba), pixelCoverage(0xbb), + pixelCoverage(0xbc), pixelCoverage(0xbd), pixelCoverage(0xbe), pixelCoverage(0xbf), + pixelCoverage(0xc0), pixelCoverage(0xc1), pixelCoverage(0xc2), pixelCoverage(0xc3), + pixelCoverage(0xc4), pixelCoverage(0xc5), pixelCoverage(0xc6), pixelCoverage(0xc7), + pixelCoverage(0xc8), pixelCoverage(0xc9), pixelCoverage(0xca), pixelCoverage(0xcb), + pixelCoverage(0xcc), pixelCoverage(0xcd), pixelCoverage(0xce), pixelCoverage(0xcf), + pixelCoverage(0xd0), pixelCoverage(0xd1), pixelCoverage(0xd2), pixelCoverage(0xd3), + pixelCoverage(0xd4), pixelCoverage(0xd5), pixelCoverage(0xd6), pixelCoverage(0xd7), + pixelCoverage(0xd8), pixelCoverage(0xd9), pixelCoverage(0xda), pixelCoverage(0xdb), + pixelCoverage(0xdc), pixelCoverage(0xdd), pixelCoverage(0xde), pixelCoverage(0xdf), + pixelCoverage(0xe0), pixelCoverage(0xe1), pixelCoverage(0xe2), pixelCoverage(0xe3), + pixelCoverage(0xe4), pixelCoverage(0xe5), pixelCoverage(0xe6), pixelCoverage(0xe7), + pixelCoverage(0xe8), pixelCoverage(0xe9), pixelCoverage(0xea), pixelCoverage(0xeb), + pixelCoverage(0xec), pixelCoverage(0xed), pixelCoverage(0xee), pixelCoverage(0xef), + pixelCoverage(0xf0), pixelCoverage(0xf1), pixelCoverage(0xf2), pixelCoverage(0xf3), + pixelCoverage(0xf4), pixelCoverage(0xf5), pixelCoverage(0xf6), pixelCoverage(0xf7), + pixelCoverage(0xf8), pixelCoverage(0xf9), pixelCoverage(0xfa), pixelCoverage(0xfb), + pixelCoverage(0xfc), pixelCoverage(0xfd), pixelCoverage(0xfe), pixelCoverage(0xff), +} + +func pixelCoverage(a uint8) uint8 { + return a&1 + a>>1&1 + a>>2&1 + a>>3&1 + a>>4&1 + a>>5&1 + a>>6&1 + a>>7&1 +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerAA.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerAA.go new file mode 100644 index 000000000..dbff87f1e --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerAA.go @@ -0,0 +1,320 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +import ( + "image" + "image/color" + "unsafe" +) + +const ( + SUBPIXEL_SHIFT = 3 + SUBPIXEL_COUNT = 1 << SUBPIXEL_SHIFT +) + +var SUBPIXEL_OFFSETS = SUBPIXEL_OFFSETS_SAMPLE_8_FIXED + +type SUBPIXEL_DATA uint8 +type NON_ZERO_MASK_DATA_UNIT uint8 + +type Rasterizer8BitsSample struct { + MaskBuffer []SUBPIXEL_DATA + WindingBuffer []NON_ZERO_MASK_DATA_UNIT + + Width int + BufferWidth int + Height int + ClipBound [4]float64 + RemappingMatrix [6]float64 +} + +/* width and height define the maximum output size for the filler. + * The filler will output to larger bitmaps as well, but the output will + * be cropped. + */ +func NewRasterizer8BitsSample(width, height int) *Rasterizer8BitsSample { + var r Rasterizer8BitsSample + // Scale the coordinates by SUBPIXEL_COUNT in vertical direction + // The sampling point for the sub-pixel is at the top right corner. This + // adjustment moves it to the pixel center. + r.RemappingMatrix = [6]float64{1, 0, 0, SUBPIXEL_COUNT, 0.5 / SUBPIXEL_COUNT, -0.5 * SUBPIXEL_COUNT} + r.Width = width + r.Height = height + // The buffer used for filling needs to be one pixel wider than the bitmap. + // This is because the end flag that turns the fill of is the first pixel + // after the actually drawn edge. + r.BufferWidth = width + 1 + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*height*SUBPIXEL_COUNT) + r.ClipBound = clip(0, 0, width, height, SUBPIXEL_COUNT) + return &r +} + +func clip(x, y, width, height, scale int) [4]float64 { + var clipBound [4]float64 + + offset := 0.99 / float64(scale) + + clipBound[0] = float64(x) + offset + clipBound[2] = float64(x+width) - offset + + clipBound[1] = float64(y * scale) + clipBound[3] = float64((y + height) * scale) + return clipBound +} + +func intersect(r1, r2 [4]float64) [4]float64 { + if r1[0] < r2[0] { + r1[0] = r2[0] + } + if r1[2] > r2[2] { + r1[2] = r2[2] + } + if r1[0] > r1[2] { + r1[0] = r1[2] + } + + if r1[1] < r2[1] { + r1[1] = r2[1] + } + if r1[3] > r2[3] { + r1[3] = r2[3] + } + if r1[1] > r1[3] { + r1[1] = r1[3] + } + return r1 +} + +func (r *Rasterizer8BitsSample) RenderEvenOdd(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + // memset 0 the mask buffer + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addEvenOddEdge(&edges[k]) + } + p += 16 + } + + r.fillEvenOdd(img, color, clipRect) +} + +//! Adds an edge to be used with even-odd fill. +func (r *Rasterizer8BitsSample) addEvenOddEdge(edge *PolygonEdge) { + x := Fix(edge.X * FIXED_FLOAT_COEF) + slope := Fix(edge.Slope * FIXED_FLOAT_COEF) + slopeFix := Fix(0) + if edge.LastLine-edge.FirstLine >= SLOPE_FIX_STEP { + slopeFix = Fix(edge.Slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - slope<> FIXED_SHIFT) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] ^= mask + x += slope + if y&SLOPE_FIX_MASK == 0 { + x += slopeFix + } + } +} + +//! Adds an edge to be used with non-zero winding fill. +func (r *Rasterizer8BitsSample) addNonZeroEdge(edge *PolygonEdge) { + x := Fix(edge.X * FIXED_FLOAT_COEF) + slope := Fix(edge.Slope * FIXED_FLOAT_COEF) + slopeFix := Fix(0) + if edge.LastLine-edge.FirstLine >= SLOPE_FIX_STEP { + slopeFix = Fix(edge.Slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - slope<> FIXED_SHIFT) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] |= mask + r.WindingBuffer[(yLine*r.BufferWidth+xp)*SUBPIXEL_COUNT+int(ySub)] += winding + x += slope + if y&SLOPE_FIX_MASK == 0 { + x += slopeFix + } + } +} + +// Renders the mask to the canvas with even-odd fill. +func (r *Rasterizer8BitsSample) fillEvenOdd(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + mask ^= r.MaskBuffer[y*uint32(r.BufferWidth)+x] + // 8bits + alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff] + coverageTable[(mask >> 16) & 0xff] + coverageTable[(mask >> 24) & 0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := SUBPIXEL_COUNT - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} + +/* + * Renders the polygon with non-zero winding fill. + * param aTarget the target bitmap. + * param aPolygon the polygon to render. + * param aColor the color to be used for rendering. + * param aTransformation the transformation matrix. + */ +func (r *Rasterizer8BitsSample) RenderNonZeroWinding(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*r.Height*SUBPIXEL_COUNT) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addNonZeroEdge(&edges[k]) + } + p += 16 + } + + r.fillNonZero(img, color, clipRect) +} + +//! Renders the mask to the canvas with non-zero winding fill. +func (r *Rasterizer8BitsSample) fillNonZero(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + var n uint32 + var values [SUBPIXEL_COUNT]NON_ZERO_MASK_DATA_UNIT + for n = 0; n < SUBPIXEL_COUNT; n++ { + values[n] = 0 + } + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + temp := r.MaskBuffer[y*uint32(r.BufferWidth)+x] + if temp != 0 { + var bit SUBPIXEL_DATA = 1 + for n = 0; n < SUBPIXEL_COUNT; n++ { + if temp&bit != 0 { + t := values[n] + values[n] += r.WindingBuffer[(y*uint32(r.BufferWidth)+x)*SUBPIXEL_COUNT+n] + if (t == 0 || values[n] == 0) && t != values[n] { + mask ^= bit + } + } + bit <<= 1 + } + } + + // 8bits + alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff] + coverageTable[(mask >> 16) & 0xff] + coverageTable[(mask >> 24) & 0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := uint32(SUBPIXEL_COUNT) - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV1/fillerAA.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV1/fillerAA.go new file mode 100644 index 000000000..a85d34c77 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV1/fillerAA.go @@ -0,0 +1,303 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +import ( + "image" + "image/color" + "unsafe" +) + +const ( + SUBPIXEL_SHIFT = 3 + SUBPIXEL_COUNT = 1 << SUBPIXEL_SHIFT +) + +var SUBPIXEL_OFFSETS = SUBPIXEL_OFFSETS_SAMPLE_8 + +type SUBPIXEL_DATA uint16 +type NON_ZERO_MASK_DATA_UNIT uint8 + +type Rasterizer8BitsSample struct { + MaskBuffer []SUBPIXEL_DATA + WindingBuffer []NON_ZERO_MASK_DATA_UNIT + + Width int + BufferWidth int + Height int + ClipBound [4]float64 + RemappingMatrix [6]float64 +} + +/* width and height define the maximum output size for the filler. + * The filler will output to larger bitmaps as well, but the output will + * be cropped. + */ +func NewRasterizer8BitsSample(width, height int) *Rasterizer8BitsSample { + var r Rasterizer8BitsSample + // Scale the coordinates by SUBPIXEL_COUNT in vertical direction + // The sampling point for the sub-pixel is at the top right corner. This + // adjustment moves it to the pixel center. + r.RemappingMatrix = [6]float64{1, 0, 0, SUBPIXEL_COUNT, 0.5 / SUBPIXEL_COUNT, -0.5 * SUBPIXEL_COUNT} + r.Width = width + r.Height = height + // The buffer used for filling needs to be one pixel wider than the bitmap. + // This is because the end flag that turns the fill of is the first pixel + // after the actually drawn edge. + r.BufferWidth = width + 1 + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*height*SUBPIXEL_COUNT) + r.ClipBound = clip(0, 0, width, height, SUBPIXEL_COUNT) + return &r +} + +func clip(x, y, width, height, scale int) [4]float64 { + var clipBound [4]float64 + + offset := 0.99 / float64(scale) + + clipBound[0] = float64(x) + offset + clipBound[2] = float64(x+width) - offset + + clipBound[1] = float64(y * scale) + clipBound[3] = float64((y + height) * scale) + return clipBound +} + +func intersect(r1, r2 [4]float64) [4]float64 { + if r1[0] < r2[0] { + r1[0] = r2[0] + } + if r1[2] > r2[2] { + r1[2] = r2[2] + } + if r1[0] > r1[2] { + r1[0] = r1[2] + } + + if r1[1] < r2[1] { + r1[1] = r2[1] + } + if r1[3] > r2[3] { + r1[3] = r2[3] + } + if r1[1] > r1[3] { + r1[1] = r1[3] + } + return r1 +} + +func (r *Rasterizer8BitsSample) RenderEvenOdd(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + // memset 0 the mask buffer + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addEvenOddEdge(&edges[k]) + } + p += 16 + } + + r.fillEvenOdd(img, color, clipRect) +} + +//! Adds an edge to be used with even-odd fill. +func (r *Rasterizer8BitsSample) addEvenOddEdge(edge *PolygonEdge) { + x := edge.X + slope := edge.Slope + var ySub, mask SUBPIXEL_DATA + var xp, yLine int + for y := edge.FirstLine; y <= edge.LastLine; y++ { + ySub = SUBPIXEL_DATA(y & (SUBPIXEL_COUNT - 1)) + xp = int(x + SUBPIXEL_OFFSETS[ySub]) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] ^= mask + x += slope + } +} + +// Renders the mask to the canvas with even-odd fill. +func (r *Rasterizer8BitsSample) fillEvenOdd(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + mask ^= r.MaskBuffer[y*uint32(r.BufferWidth)+x] + // 8bits + alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff] + coverageTable[(mask >> 16) & 0xff] + coverageTable[(mask >> 24) & 0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := uint32(SUBPIXEL_COUNT) - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} + +/* + * Renders the polygon with non-zero winding fill. + * param aTarget the target bitmap. + * param aPolygon the polygon to render. + * param aColor the color to be used for rendering. + * param aTransformation the transformation matrix. + */ +func (r *Rasterizer8BitsSample) RenderNonZeroWinding(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*r.Height*SUBPIXEL_COUNT) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addNonZeroEdge(&edges[k]) + } + p += 16 + } + + r.fillNonZero(img, color, clipRect) +} + +//! Adds an edge to be used with non-zero winding fill. +func (r *Rasterizer8BitsSample) addNonZeroEdge(edge *PolygonEdge) { + x := edge.X + slope := edge.Slope + var ySub, mask SUBPIXEL_DATA + var xp, yLine int + winding := NON_ZERO_MASK_DATA_UNIT(edge.Winding) + for y := edge.FirstLine; y <= edge.LastLine; y++ { + ySub = SUBPIXEL_DATA(y & (SUBPIXEL_COUNT - 1)) + xp = int(x + SUBPIXEL_OFFSETS[ySub]) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] |= mask + r.WindingBuffer[(yLine*r.BufferWidth+xp)*SUBPIXEL_COUNT+int(ySub)] += winding + x += slope + } +} + +//! Renders the mask to the canvas with non-zero winding fill. +func (r *Rasterizer8BitsSample) fillNonZero(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + var n uint32 + var values [SUBPIXEL_COUNT]NON_ZERO_MASK_DATA_UNIT + for n = 0; n < SUBPIXEL_COUNT; n++ { + values[n] = 0 + } + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + temp := r.MaskBuffer[y*uint32(r.BufferWidth)+x] + if temp != 0 { + var bit SUBPIXEL_DATA = 1 + for n = 0; n < SUBPIXEL_COUNT; n++ { + if temp&bit != 0 { + t := values[n] + values[n] += r.WindingBuffer[(y*uint32(r.BufferWidth)+x)*SUBPIXEL_COUNT+n] + if (t == 0 || values[n] == 0) && t != values[n] { + mask ^= bit + } + } + bit <<= 1 + } + } + + // 8bits + alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff] + coverageTable[(mask >> 16) & 0xff] + coverageTable[(mask >> 24) & 0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := uint32(SUBPIXEL_COUNT) - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV2/fillerAA.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV2/fillerAA.go new file mode 100644 index 000000000..0bda5a4db --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fillerV2/fillerAA.go @@ -0,0 +1,320 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +import ( + "image" + "image/color" + "unsafe" +) + +const ( + SUBPIXEL_SHIFT = 5 + SUBPIXEL_COUNT = 1 << SUBPIXEL_SHIFT +) + +var SUBPIXEL_OFFSETS = SUBPIXEL_OFFSETS_SAMPLE_32_FIXED + +type SUBPIXEL_DATA uint32 +type NON_ZERO_MASK_DATA_UNIT uint8 + +type Rasterizer8BitsSample struct { + MaskBuffer []SUBPIXEL_DATA + WindingBuffer []NON_ZERO_MASK_DATA_UNIT + + Width int + BufferWidth int + Height int + ClipBound [4]float64 + RemappingMatrix [6]float64 +} + +/* width and height define the maximum output size for the filler. + * The filler will output to larger bitmaps as well, but the output will + * be cropped. + */ +func NewRasterizer8BitsSample(width, height int) *Rasterizer8BitsSample { + var r Rasterizer8BitsSample + // Scale the coordinates by SUBPIXEL_COUNT in vertical direction + // The sampling point for the sub-pixel is at the top right corner. This + // adjustment moves it to the pixel center. + r.RemappingMatrix = [6]float64{1, 0, 0, SUBPIXEL_COUNT, 0.5 / SUBPIXEL_COUNT, -0.5 * SUBPIXEL_COUNT} + r.Width = width + r.Height = height + // The buffer used for filling needs to be one pixel wider than the bitmap. + // This is because the end flag that turns the fill of is the first pixel + // after the actually drawn edge. + r.BufferWidth = width + 1 + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*height*SUBPIXEL_COUNT) + r.ClipBound = clip(0, 0, width, height, SUBPIXEL_COUNT) + return &r +} + +func clip(x, y, width, height, scale int) [4]float64 { + var clipBound [4]float64 + + offset := 0.99 / float64(scale) + + clipBound[0] = float64(x) + offset + clipBound[2] = float64(x+width) - offset + + clipBound[1] = float64(y * scale) + clipBound[3] = float64((y + height) * scale) + return clipBound +} + +func intersect(r1, r2 [4]float64) [4]float64 { + if r1[0] < r2[0] { + r1[0] = r2[0] + } + if r1[2] > r2[2] { + r1[2] = r2[2] + } + if r1[0] > r1[2] { + r1[0] = r1[2] + } + + if r1[1] < r2[1] { + r1[1] = r2[1] + } + if r1[3] > r2[3] { + r1[3] = r2[3] + } + if r1[1] > r1[3] { + r1[1] = r1[3] + } + return r1 +} + +func (r *Rasterizer8BitsSample) RenderEvenOdd(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + // memset 0 the mask buffer + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addEvenOddEdge(&edges[k]) + } + p += 16 + } + + r.fillEvenOdd(img, color, clipRect) +} + +//! Adds an edge to be used with even-odd fill. +func (r *Rasterizer8BitsSample) addEvenOddEdge(edge *PolygonEdge) { + x := Fix(edge.X * FIXED_FLOAT_COEF) + slope := Fix(edge.Slope * FIXED_FLOAT_COEF) + slopeFix := Fix(0) + if edge.LastLine-edge.FirstLine >= SLOPE_FIX_STEP { + slopeFix = Fix(edge.Slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - slope<> FIXED_SHIFT) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] ^= mask + x += slope + if y&SLOPE_FIX_MASK == 0 { + x += slopeFix + } + } +} + +//! Adds an edge to be used with non-zero winding fill. +func (r *Rasterizer8BitsSample) addNonZeroEdge(edge *PolygonEdge) { + x := Fix(edge.X * FIXED_FLOAT_COEF) + slope := Fix(edge.Slope * FIXED_FLOAT_COEF) + slopeFix := Fix(0) + if edge.LastLine-edge.FirstLine >= SLOPE_FIX_STEP { + slopeFix = Fix(edge.Slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - slope<> FIXED_SHIFT) + mask = SUBPIXEL_DATA(1 << ySub) + yLine = y >> SUBPIXEL_SHIFT + r.MaskBuffer[yLine*r.BufferWidth+xp] |= mask + r.WindingBuffer[(yLine*r.BufferWidth+xp)*SUBPIXEL_COUNT+int(ySub)] += winding + x += slope + if y&SLOPE_FIX_MASK == 0 { + x += slopeFix + } + } +} + +// Renders the mask to the canvas with even-odd fill. +func (r *Rasterizer8BitsSample) fillEvenOdd(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + mask ^= r.MaskBuffer[y*uint32(r.BufferWidth)+x] + // 8bits + //alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + alpha := uint32(coverageTable[mask&0xff] + coverageTable[mask>>8&0xff] + coverageTable[mask>>16&0xff] + coverageTable[mask>>24&0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := uint32(SUBPIXEL_COUNT) - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} + +/* + * Renders the polygon with non-zero winding fill. + * param aTarget the target bitmap. + * param aPolygon the polygon to render. + * param aColor the color to be used for rendering. + * param aTransformation the transformation matrix. + */ +func (r *Rasterizer8BitsSample) RenderNonZeroWinding(img *image.RGBA, color *color.RGBA, polygon *Polygon, tr [6]float64) { + + r.MaskBuffer = make([]SUBPIXEL_DATA, r.BufferWidth*r.Height) + r.WindingBuffer = make([]NON_ZERO_MASK_DATA_UNIT, r.BufferWidth*r.Height*SUBPIXEL_COUNT) + + // inline matrix multiplication + transform := [6]float64{ + tr[0]*r.RemappingMatrix[0] + tr[1]*r.RemappingMatrix[2], + tr[1]*r.RemappingMatrix[3] + tr[0]*r.RemappingMatrix[1], + tr[2]*r.RemappingMatrix[0] + tr[3]*r.RemappingMatrix[2], + tr[3]*r.RemappingMatrix[3] + tr[2]*r.RemappingMatrix[1], + tr[4]*r.RemappingMatrix[0] + tr[5]*r.RemappingMatrix[2] + r.RemappingMatrix[4], + tr[5]*r.RemappingMatrix[3] + tr[4]*r.RemappingMatrix[1] + r.RemappingMatrix[5], + } + + clipRect := clip(img.Bounds().Min.X, img.Bounds().Min.Y, img.Bounds().Dx(), img.Bounds().Dy(), SUBPIXEL_COUNT) + clipRect = intersect(clipRect, r.ClipBound) + + p := 0 + l := len(*polygon) / 2 + var edges [32]PolygonEdge + for p < l { + edgeCount := polygon.getEdges(p, 16, edges[:], transform, clipRect) + for k := 0; k < edgeCount; k++ { + r.addNonZeroEdge(&edges[k]) + } + p += 16 + } + + r.fillNonZero(img, color, clipRect) +} + +//! Renders the mask to the canvas with non-zero winding fill. +func (r *Rasterizer8BitsSample) fillNonZero(img *image.RGBA, color *color.RGBA, clipBound [4]float64) { + var x, y uint32 + + minX := uint32(clipBound[0]) + maxX := uint32(clipBound[2]) + + minY := uint32(clipBound[1]) >> SUBPIXEL_SHIFT + maxY := uint32(clipBound[3]) >> SUBPIXEL_SHIFT + + //pixColor := (uint32(color.R) << 24) | (uint32(color.G) << 16) | (uint32(color.B) << 8) | uint32(color.A) + pixColor := (*uint32)(unsafe.Pointer(color)) + cs1 := *pixColor & 0xff00ff + cs2 := *pixColor >> 8 & 0xff00ff + + stride := uint32(img.Stride) + var mask SUBPIXEL_DATA + var n uint32 + var values [SUBPIXEL_COUNT]NON_ZERO_MASK_DATA_UNIT + for n = 0; n < SUBPIXEL_COUNT; n++ { + values[n] = 0 + } + + for y = minY; y < maxY; y++ { + tp := img.Pix[y*stride:] + + mask = 0 + for x = minX; x <= maxX; x++ { + p := (*uint32)(unsafe.Pointer(&tp[x])) + temp := r.MaskBuffer[y*uint32(r.BufferWidth)+x] + if temp != 0 { + var bit SUBPIXEL_DATA = 1 + for n = 0; n < SUBPIXEL_COUNT; n++ { + if temp&bit != 0 { + t := values[n] + values[n] += r.WindingBuffer[(y*uint32(r.BufferWidth)+x)*SUBPIXEL_COUNT+n] + if (t == 0 || values[n] == 0) && t != values[n] { + mask ^= bit + } + } + bit <<= 1 + } + } + + // 8bits + //alpha := uint32(coverageTable[mask]) + // 16bits + //alpha := uint32(coverageTable[mask & 0xff] + coverageTable[(mask >> 8) & 0xff]) + // 32bits + alpha := uint32(coverageTable[mask&0xff] + coverageTable[mask>>8&0xff] + coverageTable[mask>>16&0xff] + coverageTable[mask>>24&0xff]) + + // alpha is in range of 0 to SUBPIXEL_COUNT + invAlpha := uint32(SUBPIXEL_COUNT) - alpha + + ct1 := *p & 0xff00ff * invAlpha + ct2 := *p >> 8 & 0xff00ff * invAlpha + + ct1 = (ct1 + cs1*alpha) >> SUBPIXEL_SHIFT & 0xff00ff + ct2 = (ct2 + cs2*alpha) << (8 - SUBPIXEL_SHIFT) & 0xff00ff00 + + *p = ct1 + ct2 + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fixed_point.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fixed_point.go new file mode 100644 index 000000000..14b8419c3 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/fixed_point.go @@ -0,0 +1,17 @@ +package raster + +type Fix int32 + +const ( + FIXED_SHIFT = 16 + FIXED_FLOAT_COEF = 1 << FIXED_SHIFT +) + +/*! Fixed point math inevitably introduces rounding error to the DDA. The error is + * fixed every now and then by a separate fix value. The defines below set these. + */ +const ( + SLOPE_FIX_SHIFT = 8 + SLOPE_FIX_STEP = 1 << SLOPE_FIX_SHIFT + SLOPE_FIX_MASK = SLOPE_FIX_STEP - 1 +) diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/line.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/line.go new file mode 100644 index 000000000..6f6d8863f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/line.go @@ -0,0 +1,55 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +import ( + "image/color" + "image/draw" +) + +func abs(i int) int { + if i < 0 { + return -i + } + return i +} + +func PolylineBresenham(img draw.Image, c color.Color, s ...float64) { + for i := 2; i < len(s); i += 2 { + Bresenham(img, c, int(s[i-2]+0.5), int(s[i-1]+0.5), int(s[i]+0.5), int(s[i+1]+0.5)) + } +} + +func Bresenham(img draw.Image, color color.Color, x0, y0, x1, y1 int) { + dx := abs(x1 - x0) + dy := abs(y1 - y0) + var sx, sy int + if x0 < x1 { + sx = 1 + } else { + sx = -1 + } + if y0 < y1 { + sy = 1 + } else { + sy = -1 + } + err := dx - dy + + var e2 int + for { + img.Set(x0, y0, color) + if x0 == x1 && y0 == y1 { + return + } + e2 = 2 * err + if e2 > -dy { + err = err - dy + x0 = x0 + sx + } + if e2 < dx { + err = err + dx + y0 = y0 + sy + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/polygon.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/polygon.go new file mode 100644 index 000000000..2a19e7355 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/raster/polygon.go @@ -0,0 +1,581 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff +package raster + +const ( + POLYGON_CLIP_NONE = iota + POLYGON_CLIP_LEFT + POLYGON_CLIP_RIGHT + POLYGON_CLIP_TOP + POLYGON_CLIP_BOTTOM +) + +type Polygon []float64 + +type PolygonEdge struct { + X, Slope float64 + FirstLine, LastLine int + Winding int16 +} + +//! A more optimized representation of a polygon edge. +type PolygonScanEdge struct { + FirstLine, LastLine int + Winding int16 + X Fix + Slope Fix + SlopeFix Fix + NextEdge *PolygonScanEdge +} + +//! Calculates the edges of the polygon with transformation and clipping to edges array. +/*! \param startIndex the index for the first vertex. + * \param vertexCount the amount of vertices to convert. + * \param edges the array for result edges. This should be able to contain 2*aVertexCount edges. + * \param tr the transformation matrix for the polygon. + * \param aClipRectangle the clip rectangle. + * \return the amount of edges in the result. + */ +func (p Polygon) getEdges(startIndex, vertexCount int, edges []PolygonEdge, tr [6]float64, clipBound [4]float64) int { + startIndex = startIndex * 2 + endIndex := startIndex + vertexCount*2 + if endIndex > len(p) { + endIndex = len(p) + } + + x := p[startIndex] + y := p[startIndex+1] + // inline transformation + prevX := x*tr[0] + y*tr[2] + tr[4] + prevY := x*tr[1] + y*tr[3] + tr[5] + + //! Calculates the clip flags for a point. + prevClipFlags := POLYGON_CLIP_NONE + if prevX < clipBound[0] { + prevClipFlags |= POLYGON_CLIP_LEFT + } else if prevX >= clipBound[2] { + prevClipFlags |= POLYGON_CLIP_RIGHT + } + + if prevY < clipBound[1] { + prevClipFlags |= POLYGON_CLIP_TOP + } else if prevY >= clipBound[3] { + prevClipFlags |= POLYGON_CLIP_BOTTOM + } + + edgeCount := 0 + var k, clipFlags, clipSum, clipUnion int + var xleft, yleft, xright, yright, oldY, maxX, minX float64 + var swapWinding int16 + for n := startIndex; n < endIndex; n = n + 2 { + k = (n + 2) % len(p) + x = p[k]*tr[0] + p[k+1]*tr[2] + tr[4] + y = p[k]*tr[1] + p[k+1]*tr[3] + tr[5] + + //! Calculates the clip flags for a point. + clipFlags = POLYGON_CLIP_NONE + if prevX < clipBound[0] { + clipFlags |= POLYGON_CLIP_LEFT + } else if prevX >= clipBound[2] { + clipFlags |= POLYGON_CLIP_RIGHT + } + if prevY < clipBound[1] { + clipFlags |= POLYGON_CLIP_TOP + } else if prevY >= clipBound[3] { + clipFlags |= POLYGON_CLIP_BOTTOM + } + + clipSum = prevClipFlags | clipFlags + clipUnion = prevClipFlags & clipFlags + + // Skip all edges that are either completely outside at the top or at the bottom. + if clipUnion&(POLYGON_CLIP_TOP|POLYGON_CLIP_BOTTOM) == 0 { + if clipUnion&POLYGON_CLIP_RIGHT != 0 { + // Both clip to right, edge is a vertical line on the right side + if getVerticalEdge(prevY, y, clipBound[2], &edges[edgeCount], clipBound) { + edgeCount++ + } + } else if clipUnion&POLYGON_CLIP_LEFT != 0 { + // Both clip to left, edge is a vertical line on the left side + if getVerticalEdge(prevY, y, clipBound[0], &edges[edgeCount], clipBound) { + edgeCount++ + } + } else if clipSum&(POLYGON_CLIP_RIGHT|POLYGON_CLIP_LEFT) == 0 { + // No clipping in the horizontal direction + if getEdge(prevX, prevY, x, y, &edges[edgeCount], clipBound) { + edgeCount++ + } + } else { + // Clips to left or right or both. + + if x < prevX { + xleft, yleft = x, y + xright, yright = prevX, prevY + swapWinding = -1 + } else { + xleft, yleft = prevX, prevY + xright, yright = x, y + swapWinding = 1 + } + + slope := (yright - yleft) / (xright - xleft) + + if clipSum&POLYGON_CLIP_RIGHT != 0 { + // calculate new position for the right vertex + oldY = yright + maxX = clipBound[2] + + yright = yleft + (maxX-xleft)*slope + xright = maxX + + // add vertical edge for the overflowing part + if getVerticalEdge(yright, oldY, maxX, &edges[edgeCount], clipBound) { + edges[edgeCount].Winding *= swapWinding + edgeCount++ + } + } + + if clipSum&POLYGON_CLIP_LEFT != 0 { + // calculate new position for the left vertex + oldY = yleft + minX = clipBound[0] + + yleft = yleft + (minX-xleft)*slope + xleft = minX + + // add vertical edge for the overflowing part + if getVerticalEdge(oldY, yleft, minX, &edges[edgeCount], clipBound) { + edges[edgeCount].Winding *= swapWinding + edgeCount++ + } + } + + if getEdge(xleft, yleft, xright, yright, &edges[edgeCount], clipBound) { + edges[edgeCount].Winding *= swapWinding + edgeCount++ + } + } + } + + prevClipFlags = clipFlags + prevX = x + prevY = y + } + + return edgeCount +} + +//! Creates a polygon edge between two vectors. +/*! Clips the edge vertically to the clip rectangle. Returns true for edges that + * should be rendered, false for others. + */ +func getEdge(x0, y0, x1, y1 float64, edge *PolygonEdge, clipBound [4]float64) bool { + var startX, startY, endX, endY float64 + var winding int16 + + if y0 <= y1 { + startX = x0 + startY = y0 + endX = x1 + endY = y1 + winding = 1 + } else { + startX = x1 + startY = y1 + endX = x0 + endY = y0 + winding = -1 + } + + // Essentially, firstLine is floor(startY + 1) and lastLine is floor(endY). + // These are refactored to integer casts in order to avoid function + // calls. The difference with integer cast is that numbers are always + // rounded towards zero. Since values smaller than zero get clipped away, + // only coordinates between 0 and -1 require greater attention as they + // also round to zero. The problems in this range can be avoided by + // adding one to the values before conversion and subtracting after it. + + firstLine := int(startY + 1) + lastLine := int(endY+1) - 1 + + minClip := int(clipBound[1]) + maxClip := int(clipBound[3]) + + // If start and end are on the same line, the edge doesn't cross + // any lines and thus can be ignored. + // If the end is smaller than the first line, edge is out. + // If the start is larger than the last line, edge is out. + if firstLine > lastLine || lastLine < minClip || firstLine >= maxClip { + return false + } + + // Adjust the start based on the target. + if firstLine < minClip { + firstLine = minClip + } + + if lastLine >= maxClip { + lastLine = maxClip - 1 + } + edge.Slope = (endX - startX) / (endY - startY) + edge.X = startX + (float64(firstLine)-startY)*edge.Slope + edge.Winding = winding + edge.FirstLine = firstLine + edge.LastLine = lastLine + + return true +} + +//! Creates a vertical polygon edge between two y values. +/*! Clips the edge vertically to the clip rectangle. Returns true for edges that + * should be rendered, false for others. + */ +func getVerticalEdge(startY, endY, x float64, edge *PolygonEdge, clipBound [4]float64) bool { + var start, end float64 + var winding int16 + if startY < endY { + start = startY + end = endY + winding = 1 + } else { + start = endY + end = startY + winding = -1 + } + + firstLine := int(start + 1) + lastLine := int(end+1) - 1 + + minClip := int(clipBound[1]) + maxClip := int(clipBound[3]) + + // If start and end are on the same line, the edge doesn't cross + // any lines and thus can be ignored. + // If the end is smaller than the first line, edge is out. + // If the start is larger than the last line, edge is out. + if firstLine > lastLine || lastLine < minClip || firstLine >= maxClip { + return false + } + + // Adjust the start based on the clip rect. + if firstLine < minClip { + firstLine = minClip + } + if lastLine >= maxClip { + lastLine = maxClip - 1 + } + + edge.Slope = 0 + edge.X = x + edge.Winding = winding + edge.FirstLine = firstLine + edge.LastLine = lastLine + + return true +} + +type VertexData struct { + X, Y float64 + ClipFlags int + Line int +} + +//! Calculates the edges of the polygon with transformation and clipping to edges array. +/*! Note that this may return upto three times the amount of edges that the polygon has vertices, + * in the unlucky case where both left and right side get clipped for all edges. + * \param edges the array for result edges. This should be able to contain 2*aVertexCount edges. + * \param aTransformation the transformation matrix for the polygon. + * \param aClipRectangle the clip rectangle. + * \return the amount of edges in the result. + */ +func (p Polygon) getScanEdges(edges []PolygonScanEdge, tr [6]float64, clipBound [4]float64) int { + var n int + vertexData := make([]VertexData, len(p)/2+1) + for n = 0; n < len(vertexData)-1; n = n + 1 { + k := n * 2 + vertexData[n].X = p[k]*tr[0] + p[k+1]*tr[2] + tr[4] + vertexData[n].Y = p[k]*tr[1] + p[k+1]*tr[3] + tr[5] + // Calculate clip flags for all vertices. + vertexData[n].ClipFlags = POLYGON_CLIP_NONE + if vertexData[n].X < clipBound[0] { + vertexData[n].ClipFlags |= POLYGON_CLIP_LEFT + } else if vertexData[n].X >= clipBound[2] { + vertexData[n].ClipFlags |= POLYGON_CLIP_RIGHT + } + if vertexData[n].Y < clipBound[1] { + vertexData[n].ClipFlags |= POLYGON_CLIP_TOP + } else if vertexData[n].Y >= clipBound[3] { + vertexData[n].ClipFlags |= POLYGON_CLIP_BOTTOM + } + + // Calculate line of the vertex. If the vertex is clipped by top or bottom, the line + // is determined by the clip rectangle. + if vertexData[n].ClipFlags&POLYGON_CLIP_TOP != 0 { + vertexData[n].Line = int(clipBound[1]) + } else if vertexData[n].ClipFlags&POLYGON_CLIP_BOTTOM != 0 { + vertexData[n].Line = int(clipBound[3] - 1) + } else { + vertexData[n].Line = int(vertexData[n].Y+1) - 1 + } + } + + // Copy the data from 0 to the last entry to make the data to loop. + vertexData[len(vertexData)-1] = vertexData[0] + + // Transform the first vertex; store. + // Process mVertexCount - 1 times, next is n+1 + // copy the first vertex to + // Process 1 time, next is n + + edgeCount := 0 + for n = 0; n < len(vertexData)-1; n++ { + clipSum := vertexData[n].ClipFlags | vertexData[n+1].ClipFlags + clipUnion := vertexData[n].ClipFlags & vertexData[n+1].ClipFlags + + if clipUnion&(POLYGON_CLIP_TOP|POLYGON_CLIP_BOTTOM) == 0 && + vertexData[n].Line != vertexData[n+1].Line { + var startIndex, endIndex int + var winding int16 + if vertexData[n].Y < vertexData[n+1].Y { + startIndex = n + endIndex = n + 1 + winding = 1 + } else { + startIndex = n + 1 + endIndex = n + winding = -1 + } + + firstLine := vertexData[startIndex].Line + 1 + lastLine := vertexData[endIndex].Line + + if clipUnion&POLYGON_CLIP_RIGHT != 0 { + // Both clip to right, edge is a vertical line on the right side + edges[edgeCount].FirstLine = firstLine + edges[edgeCount].LastLine = lastLine + edges[edgeCount].Winding = winding + edges[edgeCount].X = Fix(clipBound[2] * FIXED_FLOAT_COEF) + edges[edgeCount].Slope = 0 + edges[edgeCount].SlopeFix = 0 + + edgeCount++ + } else if clipUnion&POLYGON_CLIP_LEFT != 0 { + // Both clip to left, edge is a vertical line on the left side + edges[edgeCount].FirstLine = firstLine + edges[edgeCount].LastLine = lastLine + edges[edgeCount].Winding = winding + edges[edgeCount].X = Fix(clipBound[0] * FIXED_FLOAT_COEF) + edges[edgeCount].Slope = 0 + edges[edgeCount].SlopeFix = 0 + + edgeCount++ + } else if clipSum&(POLYGON_CLIP_RIGHT|POLYGON_CLIP_LEFT) == 0 { + // No clipping in the horizontal direction + slope := (vertexData[endIndex].X - + vertexData[startIndex].X) / + (vertexData[endIndex].Y - + vertexData[startIndex].Y) + + // If there is vertical clip (for the top) it will be processed here. The calculation + // should be done for all non-clipping edges as well to determine the accurate position + // where the edge crosses the first scanline. + startx := vertexData[startIndex].X + + (float64(firstLine)-vertexData[startIndex].Y)*slope + + edges[edgeCount].FirstLine = firstLine + edges[edgeCount].LastLine = lastLine + edges[edgeCount].Winding = winding + edges[edgeCount].X = Fix(startx * FIXED_FLOAT_COEF) + edges[edgeCount].Slope = Fix(slope * FIXED_FLOAT_COEF) + + if lastLine-firstLine >= SLOPE_FIX_STEP { + edges[edgeCount].SlopeFix = Fix(slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - + edges[edgeCount].Slope< clipBound[3] { + clipVertices[p].ClipFlags = POLYGON_CLIP_BOTTOM + clipVertices[p].Line = int(clipBound[3] - 1) + } else { + clipVertices[p].ClipFlags = 0 + clipVertices[p].Line = int(clipVertices[p].Y+1) - 1 + } + } else { + clipVertices[p].ClipFlags = 0 + clipVertices[p].Line = int(clipVertices[p].Y+1) - 1 + } + } + } + + // Now there are three or four vertices, in the top-to-bottom order of start, clip0, clip1, + // end. What kind of edges are required for connecting these can be determined from the + // clip flags. + // -if clip vertex has horizontal clip flags, it doesn't exist. No edge is generated. + // -if start vertex or end vertex has horizontal clip flag, the edge to/from the clip vertex is vertical + // -if the line of two vertices is the same, the edge is not generated, since the edge doesn't + // cross any scanlines. + + // The alternative patterns are: + // start - clip0 - clip1 - end + // start - clip0 - end + // start - clip1 - end + + var topClipIndex, bottomClipIndex int + if (clipVertices[0].ClipFlags|clipVertices[1].ClipFlags)& + (POLYGON_CLIP_LEFT|POLYGON_CLIP_RIGHT) == 0 { + // Both sides are clipped, the order is start-clip0-clip1-end + topClipIndex = 0 + bottomClipIndex = 1 + + // Add the edge from clip0 to clip1 + // Check that the line is different for the vertices. + if clipVertices[0].Line != clipVertices[1].Line { + firstClipLine := clipVertices[0].Line + 1 + + startx := vertexData[startIndex].X + + (float64(firstClipLine)-vertexData[startIndex].Y)*slope + + edges[edgeCount].X = Fix(startx * FIXED_FLOAT_COEF) + edges[edgeCount].Slope = Fix(slope * FIXED_FLOAT_COEF) + edges[edgeCount].FirstLine = firstClipLine + edges[edgeCount].LastLine = clipVertices[1].Line + edges[edgeCount].Winding = winding + + if edges[edgeCount].LastLine-edges[edgeCount].FirstLine >= SLOPE_FIX_STEP { + edges[edgeCount].SlopeFix = Fix(slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - + edges[edgeCount].Slope<= SLOPE_FIX_STEP { + edges[edgeCount].SlopeFix = Fix(slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - + edges[edgeCount].Slope<= SLOPE_FIX_STEP { + edges[edgeCount].SlopeFix = Fix(slope*SLOPE_FIX_STEP*FIXED_FLOAT_COEF) - + edges[edgeCount].Slope< cap(p.points) { + points := make([]float64, len(p.points)+2, len(p.points)+32) + copy(points, p.points) + p.points = points + } else { + p.points = p.points[0 : len(p.points)+2] + } + p.points[len(p.points)-2] = x + p.points[len(p.points)-1] = y +} + +func TestFreetype(t *testing.T) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := raster.NewRasterizer(200, 200) + rasterizer.UseNonZeroWinding = false + rasterizer.Start(raster.Point{raster.Fix32(10 * 256), raster.Fix32(190 * 256)}) + for j := 0; j < len(poly); j = j + 2 { + rasterizer.Add1(raster.Point{raster.Fix32(poly[j] * 256), raster.Fix32(poly[j+1] * 256)}) + } + painter := raster.NewRGBAPainter(img) + painter.SetColor(color) + rasterizer.Rasterize(painter) + + savepng("_testFreetype.png", img) +} + +func TestFreetypeNonZeroWinding(t *testing.T) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := raster.NewRasterizer(200, 200) + rasterizer.UseNonZeroWinding = true + rasterizer.Start(raster.Point{raster.Fix32(10 * 256), raster.Fix32(190 * 256)}) + for j := 0; j < len(poly); j = j + 2 { + rasterizer.Add1(raster.Point{raster.Fix32(poly[j] * 256), raster.Fix32(poly[j+1] * 256)}) + } + painter := raster.NewRGBAPainter(img) + painter.SetColor(color) + rasterizer.Rasterize(painter) + + savepng("_testFreetypeNonZeroWinding.png", img) +} + +func TestRasterizer(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + tr := [6]float64{1, 0, 0, 1, 0, 0} + r := NewRasterizer8BitsSample(200, 200) + //PolylineBresenham(img, image.Black, poly...) + + r.RenderEvenOdd(img, &color, &poly, tr) + savepng("_testRasterizer.png", img) +} + +func TestRasterizerNonZeroWinding(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + tr := [6]float64{1, 0, 0, 1, 0, 0} + r := NewRasterizer8BitsSample(200, 200) + //PolylineBresenham(img, image.Black, poly...) + + r.RenderNonZeroWinding(img, &color, &poly, tr) + savepng("_testRasterizerNonZeroWinding.png", img) +} + +func BenchmarkFreetype(b *testing.B) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + + for i := 0; i < b.N; i++ { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := raster.NewRasterizer(200, 200) + rasterizer.UseNonZeroWinding = false + rasterizer.Start(raster.Point{raster.Fix32(10 * 256), raster.Fix32(190 * 256)}) + for j := 0; j < len(poly); j = j + 2 { + rasterizer.Add1(raster.Point{raster.Fix32(poly[j] * 256), raster.Fix32(poly[j+1] * 256)}) + } + painter := raster.NewRGBAPainter(img) + painter.SetColor(color) + rasterizer.Rasterize(painter) + } +} +func BenchmarkFreetypeNonZeroWinding(b *testing.B) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + + for i := 0; i < b.N; i++ { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := raster.NewRasterizer(200, 200) + rasterizer.UseNonZeroWinding = true + rasterizer.Start(raster.Point{raster.Fix32(10 * 256), raster.Fix32(190 * 256)}) + for j := 0; j < len(poly); j = j + 2 { + rasterizer.Add1(raster.Point{raster.Fix32(poly[j] * 256), raster.Fix32(poly[j+1] * 256)}) + } + painter := raster.NewRGBAPainter(img) + painter.SetColor(color) + rasterizer.Rasterize(painter) + } +} + +func BenchmarkRasterizerNonZeroWinding(b *testing.B) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + tr := [6]float64{1, 0, 0, 1, 0, 0} + for i := 0; i < b.N; i++ { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := NewRasterizer8BitsSample(200, 200) + rasterizer.RenderNonZeroWinding(img, &color, &poly, tr) + } +} + +func BenchmarkRasterizer(b *testing.B) { + var p Path + p.LineTo(10, 190) + c := curve.CubicCurveFloat64{10, 190, 10, 10, 190, 10, 190, 190} + c.Segment(&p, flattening_threshold) + poly := Polygon(p.points) + color := color.RGBA{0, 0, 0, 0xff} + tr := [6]float64{1, 0, 0, 1, 0, 0} + for i := 0; i < b.N; i++ { + img := image.NewRGBA(image.Rect(0, 0, 200, 200)) + rasterizer := NewRasterizer8BitsSample(200, 200) + rasterizer.RenderEvenOdd(img, &color, &poly, tr) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/rgba_interpolation.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/rgba_interpolation.go new file mode 100644 index 000000000..92534e7eb --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/rgba_interpolation.go @@ -0,0 +1,150 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff +// see http://pippin.gimp.org/image_processing/chap_resampling.html + +package draw2d + +import ( + "image" + "image/color" + "image/draw" + "math" +) + +type ImageFilter int + +const ( + LinearFilter ImageFilter = iota + BilinearFilter + BicubicFilter +) + +//see http://pippin.gimp.org/image_processing/chap_resampling.html +func getColorLinear(img image.Image, x, y float64) color.Color { + return img.At(int(x), int(y)) +} + +func getColorBilinear(img image.Image, x, y float64) color.Color { + x0 := math.Floor(x) + y0 := math.Floor(y) + dx := x - x0 + dy := y - y0 + + rt, gt, bt, at := img.At(int(x0), int(y0)).RGBA() + r0, g0, b0, a0 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = img.At(int(x0+1), int(y0)).RGBA() + r1, g1, b1, a1 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = img.At(int(x0+1), int(y0+1)).RGBA() + r2, g2, b2, a2 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = img.At(int(x0), int(y0+1)).RGBA() + r3, g3, b3, a3 := float64(rt), float64(gt), float64(bt), float64(at) + + r := int(lerp(lerp(r0, r1, dx), lerp(r3, r2, dx), dy)) + g := int(lerp(lerp(g0, g1, dx), lerp(g3, g2, dx), dy)) + b := int(lerp(lerp(b0, b1, dx), lerp(b3, b2, dx), dy)) + a := int(lerp(lerp(a0, a1, dx), lerp(a3, a2, dx), dy)) + return color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} +} + +/** +-- LERP +-- /lerp/, vi.,n. +-- +-- Quasi-acronym for Linear Interpolation, used as a verb or noun for +-- the operation. "Bresenham's algorithm lerps incrementally between the +-- two endpoints of the line." (From Jargon File (4.4.4, 14 Aug 2003) +*/ +func lerp(v1, v2, ratio float64) float64 { + return v1*(1-ratio) + v2*ratio +} + +func getColorCubicRow(img image.Image, x, y, offset float64) color.Color { + c0 := img.At(int(x), int(y)) + c1 := img.At(int(x+1), int(y)) + c2 := img.At(int(x+2), int(y)) + c3 := img.At(int(x+3), int(y)) + rt, gt, bt, at := c0.RGBA() + r0, g0, b0, a0 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c1.RGBA() + r1, g1, b1, a1 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c2.RGBA() + r2, g2, b2, a2 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c3.RGBA() + r3, g3, b3, a3 := float64(rt), float64(gt), float64(bt), float64(at) + r, g, b, a := cubic(offset, r0, r1, r2, r3), cubic(offset, g0, g1, g2, g3), cubic(offset, b0, b1, b2, b3), cubic(offset, a0, a1, a2, a3) + return color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} +} + +func getColorBicubic(img image.Image, x, y float64) color.Color { + x0 := math.Floor(x) + y0 := math.Floor(y) + dx := x - x0 + dy := y - y0 + c0 := getColorCubicRow(img, x0-1, y0-1, dx) + c1 := getColorCubicRow(img, x0-1, y0, dx) + c2 := getColorCubicRow(img, x0-1, y0+1, dx) + c3 := getColorCubicRow(img, x0-1, y0+2, dx) + rt, gt, bt, at := c0.RGBA() + r0, g0, b0, a0 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c1.RGBA() + r1, g1, b1, a1 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c2.RGBA() + r2, g2, b2, a2 := float64(rt), float64(gt), float64(bt), float64(at) + rt, gt, bt, at = c3.RGBA() + r3, g3, b3, a3 := float64(rt), float64(gt), float64(bt), float64(at) + r, g, b, a := cubic(dy, r0, r1, r2, r3), cubic(dy, g0, g1, g2, g3), cubic(dy, b0, b1, b2, b3), cubic(dy, a0, a1, a2, a3) + return color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} +} + +func cubic(offset, v0, v1, v2, v3 float64) uint32 { + // offset is the offset of the sampled value between v1 and v2 + return uint32(((((-7*v0+21*v1-21*v2+7*v3)*offset+ + (15*v0-36*v1+27*v2-6*v3))*offset+ + (-9*v0+9*v2))*offset + (v0 + 16*v1 + v2)) / 18.0) +} + +func DrawImage(src image.Image, dest draw.Image, tr MatrixTransform, op draw.Op, filter ImageFilter) { + bounds := src.Bounds() + x0, y0, x1, y1 := float64(bounds.Min.X), float64(bounds.Min.Y), float64(bounds.Max.X), float64(bounds.Max.Y) + tr.TransformRectangle(&x0, &y0, &x1, &y1) + var x, y, u, v float64 + var c1, c2, cr color.Color + var r, g, b, a, ia, r1, g1, b1, a1, r2, g2, b2, a2 uint32 + var color color.RGBA + for x = x0; x < x1; x++ { + for y = y0; y < y1; y++ { + u = x + v = y + tr.InverseTransform(&u, &v) + if bounds.Min.X <= int(u) && bounds.Max.X > int(u) && bounds.Min.Y <= int(v) && bounds.Max.Y > int(v) { + c1 = dest.At(int(x), int(y)) + switch filter { + case LinearFilter: + c2 = src.At(int(u), int(v)) + case BilinearFilter: + c2 = getColorBilinear(src, u, v) + case BicubicFilter: + c2 = getColorBicubic(src, u, v) + } + switch op { + case draw.Over: + r1, g1, b1, a1 = c1.RGBA() + r2, g2, b2, a2 = c2.RGBA() + ia = M - a2 + r = ((r1 * ia) / M) + r2 + g = ((g1 * ia) / M) + g2 + b = ((b1 * ia) / M) + b2 + a = ((a1 * ia) / M) + a2 + color.R = uint8(r >> 8) + color.G = uint8(g >> 8) + color.B = uint8(b >> 8) + color.A = uint8(a >> 8) + cr = color + default: + cr = c2 + } + dest.Set(int(x), int(y), cr) + } + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stack_gc.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stack_gc.go new file mode 100644 index 000000000..b2cf63fc4 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stack_gc.go @@ -0,0 +1,208 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/truetype" + "image" + "image/color" +) + +type StackGraphicContext struct { + Current *ContextStack +} + +type ContextStack struct { + Tr MatrixTransform + Path *PathStorage + LineWidth float64 + Dash []float64 + DashOffset float64 + StrokeColor color.Color + FillColor color.Color + FillRule FillRule + Cap Cap + Join Join + FontSize float64 + FontData FontData + + font *truetype.Font + // fontSize and dpi are used to calculate scale. scale is the number of + // 26.6 fixed point units in 1 em. + scale int32 + + previous *ContextStack +} + +/** + * Create a new Graphic context from an image + */ +func NewStackGraphicContext() *StackGraphicContext { + gc := &StackGraphicContext{} + gc.Current = new(ContextStack) + gc.Current.Tr = NewIdentityMatrix() + gc.Current.Path = NewPathStorage() + gc.Current.LineWidth = 1.0 + gc.Current.StrokeColor = image.Black + gc.Current.FillColor = image.White + gc.Current.Cap = RoundCap + gc.Current.FillRule = FillRuleEvenOdd + gc.Current.Join = RoundJoin + gc.Current.FontSize = 10 + gc.Current.FontData = defaultFontData + return gc +} + +func (gc *StackGraphicContext) GetMatrixTransform() MatrixTransform { + return gc.Current.Tr +} + +func (gc *StackGraphicContext) SetMatrixTransform(Tr MatrixTransform) { + gc.Current.Tr = Tr +} + +func (gc *StackGraphicContext) ComposeMatrixTransform(Tr MatrixTransform) { + gc.Current.Tr = Tr.Multiply(gc.Current.Tr) +} + +func (gc *StackGraphicContext) Rotate(angle float64) { + gc.Current.Tr = NewRotationMatrix(angle).Multiply(gc.Current.Tr) +} + +func (gc *StackGraphicContext) Translate(tx, ty float64) { + gc.Current.Tr = NewTranslationMatrix(tx, ty).Multiply(gc.Current.Tr) +} + +func (gc *StackGraphicContext) Scale(sx, sy float64) { + gc.Current.Tr = NewScaleMatrix(sx, sy).Multiply(gc.Current.Tr) +} + +func (gc *StackGraphicContext) SetStrokeColor(c color.Color) { + gc.Current.StrokeColor = c +} + +func (gc *StackGraphicContext) SetFillColor(c color.Color) { + gc.Current.FillColor = c +} + +func (gc *StackGraphicContext) SetFillRule(f FillRule) { + gc.Current.FillRule = f +} + +func (gc *StackGraphicContext) SetLineWidth(LineWidth float64) { + gc.Current.LineWidth = LineWidth +} + +func (gc *StackGraphicContext) SetLineCap(Cap Cap) { + gc.Current.Cap = Cap +} + +func (gc *StackGraphicContext) SetLineJoin(Join Join) { + gc.Current.Join = Join +} + +func (gc *StackGraphicContext) SetLineDash(Dash []float64, DashOffset float64) { + gc.Current.Dash = Dash + gc.Current.DashOffset = DashOffset +} + +func (gc *StackGraphicContext) SetFontSize(FontSize float64) { + gc.Current.FontSize = FontSize +} + +func (gc *StackGraphicContext) GetFontSize() float64 { + return gc.Current.FontSize +} + +func (gc *StackGraphicContext) SetFontData(FontData FontData) { + gc.Current.FontData = FontData +} + +func (gc *StackGraphicContext) GetFontData() FontData { + return gc.Current.FontData +} + +func (gc *StackGraphicContext) BeginPath() { + gc.Current.Path.Clear() +} + +func (gc *StackGraphicContext) IsEmpty() bool { + return gc.Current.Path.IsEmpty() +} + +func (gc *StackGraphicContext) LastPoint() (float64, float64) { + return gc.Current.Path.LastPoint() +} + +func (gc *StackGraphicContext) MoveTo(x, y float64) { + gc.Current.Path.MoveTo(x, y) +} + +func (gc *StackGraphicContext) RMoveTo(dx, dy float64) { + gc.Current.Path.RMoveTo(dx, dy) +} + +func (gc *StackGraphicContext) LineTo(x, y float64) { + gc.Current.Path.LineTo(x, y) +} + +func (gc *StackGraphicContext) RLineTo(dx, dy float64) { + gc.Current.Path.RLineTo(dx, dy) +} + +func (gc *StackGraphicContext) QuadCurveTo(cx, cy, x, y float64) { + gc.Current.Path.QuadCurveTo(cx, cy, x, y) +} + +func (gc *StackGraphicContext) RQuadCurveTo(dcx, dcy, dx, dy float64) { + gc.Current.Path.RQuadCurveTo(dcx, dcy, dx, dy) +} + +func (gc *StackGraphicContext) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) { + gc.Current.Path.CubicCurveTo(cx1, cy1, cx2, cy2, x, y) +} + +func (gc *StackGraphicContext) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) { + gc.Current.Path.RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy) +} + +func (gc *StackGraphicContext) ArcTo(cx, cy, rx, ry, startAngle, angle float64) { + gc.Current.Path.ArcTo(cx, cy, rx, ry, startAngle, angle) +} + +func (gc *StackGraphicContext) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) { + gc.Current.Path.RArcTo(dcx, dcy, rx, ry, startAngle, angle) +} + +func (gc *StackGraphicContext) Close() { + gc.Current.Path.Close() +} + +func (gc *StackGraphicContext) Save() { + context := new(ContextStack) + context.FontSize = gc.Current.FontSize + context.FontData = gc.Current.FontData + context.LineWidth = gc.Current.LineWidth + context.StrokeColor = gc.Current.StrokeColor + context.FillColor = gc.Current.FillColor + context.FillRule = gc.Current.FillRule + context.Dash = gc.Current.Dash + context.DashOffset = gc.Current.DashOffset + context.Cap = gc.Current.Cap + context.Join = gc.Current.Join + context.Path = gc.Current.Path.Copy() + context.font = gc.Current.font + context.scale = gc.Current.scale + copy(context.Tr[:], gc.Current.Tr[:]) + context.previous = gc.Current + gc.Current = context +} + +func (gc *StackGraphicContext) Restore() { + if gc.Current.previous != nil { + oldContext := gc.Current + gc.Current = gc.Current.previous + oldContext.previous = nil + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stroker.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stroker.go new file mode 100644 index 000000000..9331187f6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/stroker.go @@ -0,0 +1,135 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2d + +type Cap int + +const ( + RoundCap Cap = iota + ButtCap + SquareCap +) + +type Join int + +const ( + BevelJoin Join = iota + RoundJoin + MiterJoin +) + +type LineStroker struct { + Next VertexConverter + HalfLineWidth float64 + Cap Cap + Join Join + vertices []float64 + rewind []float64 + x, y, nx, ny float64 + command VertexCommand +} + +func NewLineStroker(c Cap, j Join, converter VertexConverter) *LineStroker { + l := new(LineStroker) + l.Next = converter + l.HalfLineWidth = 0.5 + l.vertices = make([]float64, 0, 256) + l.rewind = make([]float64, 0, 256) + l.Cap = c + l.Join = j + l.command = VertexNoCommand + return l +} + +func (l *LineStroker) NextCommand(command VertexCommand) { + l.command = command + if command == VertexStopCommand { + l.Next.NextCommand(VertexStartCommand) + for i, j := 0, 1; j < len(l.vertices); i, j = i+2, j+2 { + l.Next.Vertex(l.vertices[i], l.vertices[j]) + l.Next.NextCommand(VertexNoCommand) + } + for i, j := len(l.rewind)-2, len(l.rewind)-1; j > 0; i, j = i-2, j-2 { + l.Next.NextCommand(VertexNoCommand) + l.Next.Vertex(l.rewind[i], l.rewind[j]) + } + if len(l.vertices) > 1 { + l.Next.NextCommand(VertexNoCommand) + l.Next.Vertex(l.vertices[0], l.vertices[1]) + } + l.Next.NextCommand(VertexStopCommand) + // reinit vertices + l.vertices = l.vertices[0:0] + l.rewind = l.rewind[0:0] + l.x, l.y, l.nx, l.ny = 0, 0, 0, 0 + } +} + +func (l *LineStroker) Vertex(x, y float64) { + switch l.command { + case VertexNoCommand: + l.line(l.x, l.y, x, y) + case VertexJoinCommand: + l.joinLine(l.x, l.y, l.nx, l.ny, x, y) + case VertexStartCommand: + l.x, l.y = x, y + case VertexCloseCommand: + l.line(l.x, l.y, x, y) + l.joinLine(l.x, l.y, l.nx, l.ny, x, y) + l.closePolygon() + } + l.command = VertexNoCommand +} + +func (l *LineStroker) appendVertex(vertices ...float64) { + s := len(vertices) / 2 + if len(l.vertices)+s >= cap(l.vertices) { + v := make([]float64, len(l.vertices), cap(l.vertices)+128) + copy(v, l.vertices) + l.vertices = v + v = make([]float64, len(l.rewind), cap(l.rewind)+128) + copy(v, l.rewind) + l.rewind = v + } + + copy(l.vertices[len(l.vertices):len(l.vertices)+s], vertices[:s]) + l.vertices = l.vertices[0 : len(l.vertices)+s] + copy(l.rewind[len(l.rewind):len(l.rewind)+s], vertices[s:]) + l.rewind = l.rewind[0 : len(l.rewind)+s] + +} + +func (l *LineStroker) closePolygon() { + if len(l.vertices) > 1 { + l.appendVertex(l.vertices[0], l.vertices[1], l.rewind[0], l.rewind[1]) + } +} + +func (l *LineStroker) line(x1, y1, x2, y2 float64) { + dx := (x2 - x1) + dy := (y2 - y1) + d := vectorDistance(dx, dy) + if d != 0 { + nx := dy * l.HalfLineWidth / d + ny := -(dx * l.HalfLineWidth / d) + l.appendVertex(x1+nx, y1+ny, x2+nx, y2+ny, x1-nx, y1-ny, x2-nx, y2-ny) + l.x, l.y, l.nx, l.ny = x2, y2, nx, ny + } +} + +func (l *LineStroker) joinLine(x1, y1, nx1, ny1, x2, y2 float64) { + dx := (x2 - x1) + dy := (y2 - y1) + d := vectorDistance(dx, dy) + + if d != 0 { + nx := dy * l.HalfLineWidth / d + ny := -(dx * l.HalfLineWidth / d) + /* l.join(x1, y1, x1 + nx, y1 - ny, nx, ny, x1 + ny2, y1 + nx2, nx2, ny2) + l.join(x1, y1, x1 - ny1, y1 - nx1, nx1, ny1, x1 - ny2, y1 - nx2, nx2, ny2)*/ + + l.appendVertex(x1+nx, y1+ny, x2+nx, y2+ny, x1-nx, y1-ny, x2-nx, y2-ny) + l.x, l.y, l.nx, l.ny = x2, y2, nx, ny + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/transform.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/transform.go new file mode 100644 index 000000000..1d89bfa9b --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/transform.go @@ -0,0 +1,306 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "code.google.com/p/freetype-go/freetype/raster" + "math" +) + +type MatrixTransform [6]float64 + +const ( + epsilon = 1e-6 +) + +func (tr MatrixTransform) Determinant() float64 { + return tr[0]*tr[3] - tr[1]*tr[2] +} + +func (tr MatrixTransform) Transform(points ...*float64) { + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := *points[i] + y := *points[j] + *points[i] = x*tr[0] + y*tr[2] + tr[4] + *points[j] = x*tr[1] + y*tr[3] + tr[5] + } +} + +func (tr MatrixTransform) TransformArray(points []float64) { + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := points[i] + y := points[j] + points[i] = x*tr[0] + y*tr[2] + tr[4] + points[j] = x*tr[1] + y*tr[3] + tr[5] + } +} + +func (tr MatrixTransform) TransformRectangle(x0, y0, x2, y2 *float64) { + x1 := *x2 + y1 := *y0 + x3 := *x0 + y3 := *y2 + tr.Transform(x0, y0, &x1, &y1, x2, y2, &x3, &y3) + *x0, x1 = minMax(*x0, x1) + *x2, x3 = minMax(*x2, x3) + *y0, y1 = minMax(*y0, y1) + *y2, y3 = minMax(*y2, y3) + + *x0 = min(*x0, *x2) + *y0 = min(*y0, *y2) + *x2 = max(x1, x3) + *y2 = max(y1, y3) +} + +func (tr MatrixTransform) TransformRasterPoint(points ...*raster.Point) { + for _, point := range points { + x := float64(point.X) / 256 + y := float64(point.Y) / 256 + point.X = raster.Fix32((x*tr[0] + y*tr[2] + tr[4]) * 256) + point.Y = raster.Fix32((x*tr[1] + y*tr[3] + tr[5]) * 256) + } +} + +func (tr MatrixTransform) InverseTransform(points ...*float64) { + d := tr.Determinant() // matrix determinant + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := *points[i] + y := *points[j] + *points[i] = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d + *points[j] = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d + } +} + +// ******************** Vector transformations ******************** + +func (tr MatrixTransform) VectorTransform(points ...*float64) { + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := *points[i] + y := *points[j] + *points[i] = x*tr[0] + y*tr[2] + *points[j] = x*tr[1] + y*tr[3] + } +} + +// ******************** Transformations creation ******************** + +/** Creates an identity transformation. */ +func NewIdentityMatrix() MatrixTransform { + return [6]float64{1, 0, 0, 1, 0, 0} +} + +/** + * Creates a transformation with a translation, that, + * transform point1 into point2. + */ +func NewTranslationMatrix(tx, ty float64) MatrixTransform { + return [6]float64{1, 0, 0, 1, tx, ty} +} + +/** + * Creates a transformation with a sx, sy scale factor + */ +func NewScaleMatrix(sx, sy float64) MatrixTransform { + return [6]float64{sx, 0, 0, sy, 0, 0} +} + +/** + * Creates a rotation transformation. + */ +func NewRotationMatrix(angle float64) MatrixTransform { + c := math.Cos(angle) + s := math.Sin(angle) + return [6]float64{c, s, -s, c, 0, 0} +} + +/** + * Creates a transformation, combining a scale and a translation, that transform rectangle1 into rectangle2. + */ +func NewMatrixTransform(rectangle1, rectangle2 [4]float64) MatrixTransform { + xScale := (rectangle2[2] - rectangle2[0]) / (rectangle1[2] - rectangle1[0]) + yScale := (rectangle2[3] - rectangle2[1]) / (rectangle1[3] - rectangle1[1]) + xOffset := rectangle2[0] - (rectangle1[0] * xScale) + yOffset := rectangle2[1] - (rectangle1[1] * yScale) + return [6]float64{xScale, 0, 0, yScale, xOffset, yOffset} +} + +// ******************** Transformations operations ******************** + +/** + * Returns a transformation that is the inverse of the given transformation. + */ +func (tr MatrixTransform) GetInverseTransformation() MatrixTransform { + d := tr.Determinant() // matrix determinant + return [6]float64{ + tr[3] / d, + -tr[1] / d, + -tr[2] / d, + tr[0] / d, + (tr[2]*tr[5] - tr[3]*tr[4]) / d, + (tr[1]*tr[4] - tr[0]*tr[5]) / d} +} + +func (tr1 MatrixTransform) Multiply(tr2 MatrixTransform) MatrixTransform { + return [6]float64{ + tr1[0]*tr2[0] + tr1[1]*tr2[2], + tr1[1]*tr2[3] + tr1[0]*tr2[1], + tr1[2]*tr2[0] + tr1[3]*tr2[2], + tr1[3]*tr2[3] + tr1[2]*tr2[1], + tr1[4]*tr2[0] + tr1[5]*tr2[2] + tr2[4], + tr1[5]*tr2[3] + tr1[4]*tr2[1] + tr2[5]} +} + +func (tr *MatrixTransform) Scale(sx, sy float64) *MatrixTransform { + tr[0] = sx * tr[0] + tr[1] = sx * tr[1] + tr[2] = sy * tr[2] + tr[3] = sy * tr[3] + return tr +} + +func (tr *MatrixTransform) Translate(tx, ty float64) *MatrixTransform { + tr[4] = tx*tr[0] + ty*tr[2] + tr[4] + tr[5] = ty*tr[3] + tx*tr[1] + tr[5] + return tr +} + +func (tr *MatrixTransform) Rotate(angle float64) *MatrixTransform { + c := math.Cos(angle) + s := math.Sin(angle) + t0 := c*tr[0] + s*tr[2] + t1 := s*tr[3] + c*tr[1] + t2 := c*tr[2] - s*tr[0] + t3 := c*tr[3] - s*tr[1] + tr[0] = t0 + tr[1] = t1 + tr[2] = t2 + tr[3] = t3 + return tr +} + +func (tr MatrixTransform) GetTranslation() (x, y float64) { + return tr[4], tr[5] +} + +func (tr MatrixTransform) GetScaling() (x, y float64) { + return tr[0], tr[3] +} + +func (tr MatrixTransform) GetScale() float64 { + x := 0.707106781*tr[0] + 0.707106781*tr[1] + y := 0.707106781*tr[2] + 0.707106781*tr[3] + return math.Sqrt(x*x + y*y) +} + +func (tr MatrixTransform) GetMaxAbsScaling() (s float64) { + sx := math.Abs(tr[0]) + sy := math.Abs(tr[3]) + if sx > sy { + return sx + } + return sy +} + +func (tr MatrixTransform) GetMinAbsScaling() (s float64) { + sx := math.Abs(tr[0]) + sy := math.Abs(tr[3]) + if sx > sy { + return sy + } + return sx +} + +// ******************** Testing ******************** + +/** + * Tests if a two transformation are equal. A tolerance is applied when + * comparing matrix elements. + */ +func (tr1 MatrixTransform) Equals(tr2 MatrixTransform) bool { + for i := 0; i < 6; i = i + 1 { + if !fequals(tr1[i], tr2[i]) { + return false + } + } + return true +} + +/** + * Tests if a transformation is the identity transformation. A tolerance + * is applied when comparing matrix elements. + */ +func (tr MatrixTransform) IsIdentity() bool { + return fequals(tr[4], 0) && fequals(tr[5], 0) && tr.IsTranslation() +} + +/** + * Tests if a transformation is is a pure translation. A tolerance + * is applied when comparing matrix elements. + */ +func (tr MatrixTransform) IsTranslation() bool { + return fequals(tr[0], 1) && fequals(tr[1], 0) && fequals(tr[2], 0) && fequals(tr[3], 1) +} + +/** + * Compares two floats. + * return true if the distance between the two floats is less than epsilon, false otherwise + */ +func fequals(float1, float2 float64) bool { + return math.Abs(float1-float2) <= epsilon +} + +// this VertexConverter apply the Matrix transformation tr +type VertexMatrixTransform struct { + tr MatrixTransform + Next VertexConverter +} + +func NewVertexMatrixTransform(tr MatrixTransform, converter VertexConverter) *VertexMatrixTransform { + return &VertexMatrixTransform{tr, converter} +} + +// Vertex Matrix Transform +func (vmt *VertexMatrixTransform) NextCommand(command VertexCommand) { + vmt.Next.NextCommand(command) +} + +func (vmt *VertexMatrixTransform) Vertex(x, y float64) { + u := x*vmt.tr[0] + y*vmt.tr[2] + vmt.tr[4] + v := x*vmt.tr[1] + y*vmt.tr[3] + vmt.tr[5] + vmt.Next.Vertex(u, v) +} + +// this adder apply a Matrix transformation to points +type MatrixTransformAdder struct { + tr MatrixTransform + next raster.Adder +} + +func NewMatrixTransformAdder(tr MatrixTransform, adder raster.Adder) *MatrixTransformAdder { + return &MatrixTransformAdder{tr, adder} +} + +// Start starts a new curve at the given point. +func (mta MatrixTransformAdder) Start(a raster.Point) { + mta.tr.TransformRasterPoint(&a) + mta.next.Start(a) +} + +// Add1 adds a linear segment to the current curve. +func (mta MatrixTransformAdder) Add1(b raster.Point) { + mta.tr.TransformRasterPoint(&b) + mta.next.Add1(b) +} + +// Add2 adds a quadratic segment to the current curve. +func (mta MatrixTransformAdder) Add2(b, c raster.Point) { + mta.tr.TransformRasterPoint(&b, &c) + mta.next.Add2(b, c) +} + +// Add3 adds a cubic segment to the current curve. +func (mta MatrixTransformAdder) Add3(b, c, d raster.Point) { + mta.tr.TransformRasterPoint(&b, &c, &d) + mta.next.Add3(b, c, d) +} diff --git a/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/vertex2d.go b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/vertex2d.go new file mode 100644 index 000000000..4e4d4fd83 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/draw2d/draw2d/vertex2d.go @@ -0,0 +1,19 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +type VertexCommand byte + +const ( + VertexNoCommand VertexCommand = iota + VertexStartCommand + VertexJoinCommand + VertexCloseCommand + VertexStopCommand +) + +type VertexConverter interface { + NextCommand(cmd VertexCommand) + Vertex(x, y float64) +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/geom.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/geom.go new file mode 100644 index 000000000..63c86e6ab --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/geom.go @@ -0,0 +1,280 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package raster + +import ( + "fmt" + "math" +) + +// A Fix32 is a 24.8 fixed point number. +type Fix32 int32 + +// A Fix64 is a 48.16 fixed point number. +type Fix64 int64 + +// String returns a human-readable representation of a 24.8 fixed point number. +// For example, the number one-and-a-quarter becomes "1:064". +func (x Fix32) String() string { + if x < 0 { + x = -x + return fmt.Sprintf("-%d:%03d", int32(x/256), int32(x%256)) + } + return fmt.Sprintf("%d:%03d", int32(x/256), int32(x%256)) +} + +// String returns a human-readable representation of a 48.16 fixed point number. +// For example, the number one-and-a-quarter becomes "1:16384". +func (x Fix64) String() string { + if x < 0 { + x = -x + return fmt.Sprintf("-%d:%05d", int64(x/65536), int64(x%65536)) + } + return fmt.Sprintf("%d:%05d", int64(x/65536), int64(x%65536)) +} + +// maxAbs returns the maximum of abs(a) and abs(b). +func maxAbs(a, b Fix32) Fix32 { + if a < 0 { + a = -a + } + if b < 0 { + b = -b + } + if a < b { + return b + } + return a +} + +// A Point represents a two-dimensional point or vector, in 24.8 fixed point +// format. +type Point struct { + X, Y Fix32 +} + +// String returns a human-readable representation of a Point. +func (p Point) String() string { + return "(" + p.X.String() + ", " + p.Y.String() + ")" +} + +// Add returns the vector p + q. +func (p Point) Add(q Point) Point { + return Point{p.X + q.X, p.Y + q.Y} +} + +// Sub returns the vector p - q. +func (p Point) Sub(q Point) Point { + return Point{p.X - q.X, p.Y - q.Y} +} + +// Mul returns the vector k * p. +func (p Point) Mul(k Fix32) Point { + return Point{p.X * k / 256, p.Y * k / 256} +} + +// Neg returns the vector -p, or equivalently p rotated by 180 degrees. +func (p Point) Neg() Point { + return Point{-p.X, -p.Y} +} + +// Dot returns the dot product p·q. +func (p Point) Dot(q Point) Fix64 { + px, py := int64(p.X), int64(p.Y) + qx, qy := int64(q.X), int64(q.Y) + return Fix64(px*qx + py*qy) +} + +// Len returns the length of the vector p. +func (p Point) Len() Fix32 { + // TODO(nigeltao): use fixed point math. + x := float64(p.X) + y := float64(p.Y) + return Fix32(math.Sqrt(x*x + y*y)) +} + +// Norm returns the vector p normalized to the given length, or the zero Point +// if p is degenerate. +func (p Point) Norm(length Fix32) Point { + d := p.Len() + if d == 0 { + return Point{} + } + s, t := int64(length), int64(d) + x := int64(p.X) * s / t + y := int64(p.Y) * s / t + return Point{Fix32(x), Fix32(y)} +} + +// Rot45CW returns the vector p rotated clockwise by 45 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot45CW is {1/√2, 1/√2}. +func (p Point) Rot45CW() Point { + // 181/256 is approximately 1/√2, or sin(π/4). + px, py := int64(p.X), int64(p.Y) + qx := (+px - py) * 181 / 256 + qy := (+px + py) * 181 / 256 + return Point{Fix32(qx), Fix32(qy)} +} + +// Rot90CW returns the vector p rotated clockwise by 90 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot90CW is {0, 1}. +func (p Point) Rot90CW() Point { + return Point{-p.Y, p.X} +} + +// Rot135CW returns the vector p rotated clockwise by 135 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot135CW is {-1/√2, 1/√2}. +func (p Point) Rot135CW() Point { + // 181/256 is approximately 1/√2, or sin(π/4). + px, py := int64(p.X), int64(p.Y) + qx := (-px - py) * 181 / 256 + qy := (+px - py) * 181 / 256 + return Point{Fix32(qx), Fix32(qy)} +} + +// Rot45CCW returns the vector p rotated counter-clockwise by 45 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot45CCW is {1/√2, -1/√2}. +func (p Point) Rot45CCW() Point { + // 181/256 is approximately 1/√2, or sin(π/4). + px, py := int64(p.X), int64(p.Y) + qx := (+px + py) * 181 / 256 + qy := (-px + py) * 181 / 256 + return Point{Fix32(qx), Fix32(qy)} +} + +// Rot90CCW returns the vector p rotated counter-clockwise by 90 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot90CCW is {0, -1}. +func (p Point) Rot90CCW() Point { + return Point{p.Y, -p.X} +} + +// Rot135CCW returns the vector p rotated counter-clockwise by 135 degrees. +// Note that the Y-axis grows downwards, so {1, 0}.Rot135CCW is {-1/√2, -1/√2}. +func (p Point) Rot135CCW() Point { + // 181/256 is approximately 1/√2, or sin(π/4). + px, py := int64(p.X), int64(p.Y) + qx := (-px + py) * 181 / 256 + qy := (-px - py) * 181 / 256 + return Point{Fix32(qx), Fix32(qy)} +} + +// An Adder accumulates points on a curve. +type Adder interface { + // Start starts a new curve at the given point. + Start(a Point) + // Add1 adds a linear segment to the current curve. + Add1(b Point) + // Add2 adds a quadratic segment to the current curve. + Add2(b, c Point) + // Add3 adds a cubic segment to the current curve. + Add3(b, c, d Point) +} + +// A Path is a sequence of curves, and a curve is a start point followed by a +// sequence of linear, quadratic or cubic segments. +type Path []Fix32 + +// String returns a human-readable representation of a Path. +func (p Path) String() string { + s := "" + for i := 0; i < len(p); { + if i != 0 { + s += " " + } + switch p[i] { + case 0: + s += "S0" + fmt.Sprint([]Fix32(p[i+1:i+3])) + i += 4 + case 1: + s += "A1" + fmt.Sprint([]Fix32(p[i+1:i+3])) + i += 4 + case 2: + s += "A2" + fmt.Sprint([]Fix32(p[i+1:i+5])) + i += 6 + case 3: + s += "A3" + fmt.Sprint([]Fix32(p[i+1:i+7])) + i += 8 + default: + panic("freetype/raster: bad path") + } + } + return s +} + +// Clear cancels any previous calls to p.Start or p.AddXxx. +func (p *Path) Clear() { + *p = (*p)[:0] +} + +// Start starts a new curve at the given point. +func (p *Path) Start(a Point) { + *p = append(*p, 0, a.X, a.Y, 0) +} + +// Add1 adds a linear segment to the current curve. +func (p *Path) Add1(b Point) { + *p = append(*p, 1, b.X, b.Y, 1) +} + +// Add2 adds a quadratic segment to the current curve. +func (p *Path) Add2(b, c Point) { + *p = append(*p, 2, b.X, b.Y, c.X, c.Y, 2) +} + +// Add3 adds a cubic segment to the current curve. +func (p *Path) Add3(b, c, d Point) { + *p = append(*p, 3, b.X, b.Y, c.X, c.Y, d.X, d.Y, 3) +} + +// AddPath adds the Path q to p. +func (p *Path) AddPath(q Path) { + *p = append(*p, q...) +} + +// AddStroke adds a stroked Path. +func (p *Path) AddStroke(q Path, width Fix32, cr Capper, jr Joiner) { + Stroke(p, q, width, cr, jr) +} + +// firstPoint returns the first point in a non-empty Path. +func (p Path) firstPoint() Point { + return Point{p[1], p[2]} +} + +// lastPoint returns the last point in a non-empty Path. +func (p Path) lastPoint() Point { + return Point{p[len(p)-3], p[len(p)-2]} +} + +// addPathReversed adds q reversed to p. +// For example, if q consists of a linear segment from A to B followed by a +// quadratic segment from B to C to D, then the values of q looks like: +// index: 01234567890123 +// value: 0AA01BB12CCDD2 +// So, when adding q backwards to p, we want to Add2(C, B) followed by Add1(A). +func addPathReversed(p Adder, q Path) { + if len(q) == 0 { + return + } + i := len(q) - 1 + for { + switch q[i] { + case 0: + return + case 1: + i -= 4 + p.Add1(Point{q[i-2], q[i-1]}) + case 2: + i -= 6 + p.Add2(Point{q[i+2], q[i+3]}, Point{q[i-2], q[i-1]}) + case 3: + i -= 8 + p.Add3(Point{q[i+4], q[i+5]}, Point{q[i+2], q[i+3]}, Point{q[i-2], q[i-1]}) + default: + panic("freetype/raster: bad path") + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/paint.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/paint.go new file mode 100644 index 000000000..13cccc192 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/paint.go @@ -0,0 +1,292 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package raster + +import ( + "image" + "image/color" + "image/draw" + "math" +) + +// A Span is a horizontal segment of pixels with constant alpha. X0 is an +// inclusive bound and X1 is exclusive, the same as for slices. A fully +// opaque Span has A == 1<<32 - 1. +type Span struct { + Y, X0, X1 int + A uint32 +} + +// A Painter knows how to paint a batch of Spans. Rasterization may involve +// Painting multiple batches, and done will be true for the final batch. +// The Spans' Y values are monotonically increasing during a rasterization. +// Paint may use all of ss as scratch space during the call. +type Painter interface { + Paint(ss []Span, done bool) +} + +// The PainterFunc type adapts an ordinary function to the Painter interface. +type PainterFunc func(ss []Span, done bool) + +// Paint just delegates the call to f. +func (f PainterFunc) Paint(ss []Span, done bool) { f(ss, done) } + +// An AlphaOverPainter is a Painter that paints Spans onto an image.Alpha +// using the Over Porter-Duff composition operator. +type AlphaOverPainter struct { + Image *image.Alpha +} + +// Paint satisfies the Painter interface by painting ss onto an image.Alpha. +func (r AlphaOverPainter) Paint(ss []Span, done bool) { + b := r.Image.Bounds() + for _, s := range ss { + if s.Y < b.Min.Y { + continue + } + if s.Y >= b.Max.Y { + return + } + if s.X0 < b.Min.X { + s.X0 = b.Min.X + } + if s.X1 > b.Max.X { + s.X1 = b.Max.X + } + if s.X0 >= s.X1 { + continue + } + base := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride - r.Image.Rect.Min.X + p := r.Image.Pix[base+s.X0 : base+s.X1] + a := int(s.A >> 24) + for i, c := range p { + v := int(c) + p[i] = uint8((v*255 + (255-v)*a) / 255) + } + } +} + +// NewAlphaOverPainter creates a new AlphaOverPainter for the given image. +func NewAlphaOverPainter(m *image.Alpha) AlphaOverPainter { + return AlphaOverPainter{m} +} + +// An AlphaSrcPainter is a Painter that paints Spans onto an image.Alpha +// using the Src Porter-Duff composition operator. +type AlphaSrcPainter struct { + Image *image.Alpha +} + +// Paint satisfies the Painter interface by painting ss onto an image.Alpha. +func (r AlphaSrcPainter) Paint(ss []Span, done bool) { + b := r.Image.Bounds() + for _, s := range ss { + if s.Y < b.Min.Y { + continue + } + if s.Y >= b.Max.Y { + return + } + if s.X0 < b.Min.X { + s.X0 = b.Min.X + } + if s.X1 > b.Max.X { + s.X1 = b.Max.X + } + if s.X0 >= s.X1 { + continue + } + base := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride - r.Image.Rect.Min.X + p := r.Image.Pix[base+s.X0 : base+s.X1] + color := uint8(s.A >> 24) + for i := range p { + p[i] = color + } + } +} + +// NewAlphaSrcPainter creates a new AlphaSrcPainter for the given image. +func NewAlphaSrcPainter(m *image.Alpha) AlphaSrcPainter { + return AlphaSrcPainter{m} +} + +type RGBAPainter struct { + // The image to compose onto. + Image *image.RGBA + // The Porter-Duff composition operator. + Op draw.Op + // The 16-bit color to paint the spans. + cr, cg, cb, ca uint32 +} + +// Paint satisfies the Painter interface by painting ss onto an image.RGBA. +func (r *RGBAPainter) Paint(ss []Span, done bool) { + b := r.Image.Bounds() + for _, s := range ss { + if s.Y < b.Min.Y { + continue + } + if s.Y >= b.Max.Y { + return + } + if s.X0 < b.Min.X { + s.X0 = b.Min.X + } + if s.X1 > b.Max.X { + s.X1 = b.Max.X + } + if s.X0 >= s.X1 { + continue + } + // This code is similar to drawGlyphOver in $GOROOT/src/pkg/image/draw/draw.go. + ma := s.A >> 16 + const m = 1<<16 - 1 + i0 := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride + (s.X0-r.Image.Rect.Min.X)*4 + i1 := i0 + (s.X1-s.X0)*4 + if r.Op == draw.Over { + for i := i0; i < i1; i += 4 { + dr := uint32(r.Image.Pix[i+0]) + dg := uint32(r.Image.Pix[i+1]) + db := uint32(r.Image.Pix[i+2]) + da := uint32(r.Image.Pix[i+3]) + a := (m - (r.ca * ma / m)) * 0x101 + r.Image.Pix[i+0] = uint8((dr*a + r.cr*ma) / m >> 8) + r.Image.Pix[i+1] = uint8((dg*a + r.cg*ma) / m >> 8) + r.Image.Pix[i+2] = uint8((db*a + r.cb*ma) / m >> 8) + r.Image.Pix[i+3] = uint8((da*a + r.ca*ma) / m >> 8) + } + } else { + for i := i0; i < i1; i += 4 { + r.Image.Pix[i+0] = uint8(r.cr * ma / m >> 8) + r.Image.Pix[i+1] = uint8(r.cg * ma / m >> 8) + r.Image.Pix[i+2] = uint8(r.cb * ma / m >> 8) + r.Image.Pix[i+3] = uint8(r.ca * ma / m >> 8) + } + } + } +} + +// SetColor sets the color to paint the spans. +func (r *RGBAPainter) SetColor(c color.Color) { + r.cr, r.cg, r.cb, r.ca = c.RGBA() +} + +// NewRGBAPainter creates a new RGBAPainter for the given image. +func NewRGBAPainter(m *image.RGBA) *RGBAPainter { + return &RGBAPainter{Image: m} +} + +// A MonochromePainter wraps another Painter, quantizing each Span's alpha to +// be either fully opaque or fully transparent. +type MonochromePainter struct { + Painter Painter + y, x0, x1 int +} + +// Paint delegates to the wrapped Painter after quantizing each Span's alpha +// value and merging adjacent fully opaque Spans. +func (m *MonochromePainter) Paint(ss []Span, done bool) { + // We compact the ss slice, discarding any Spans whose alpha quantizes to zero. + j := 0 + for _, s := range ss { + if s.A >= 1<<31 { + if m.y == s.Y && m.x1 == s.X0 { + m.x1 = s.X1 + } else { + ss[j] = Span{m.y, m.x0, m.x1, 1<<32 - 1} + j++ + m.y, m.x0, m.x1 = s.Y, s.X0, s.X1 + } + } + } + if done { + // Flush the accumulated Span. + finalSpan := Span{m.y, m.x0, m.x1, 1<<32 - 1} + if j < len(ss) { + ss[j] = finalSpan + j++ + m.Painter.Paint(ss[:j], true) + } else if j == len(ss) { + m.Painter.Paint(ss, false) + if cap(ss) > 0 { + ss = ss[:1] + } else { + ss = make([]Span, 1) + } + ss[0] = finalSpan + m.Painter.Paint(ss, true) + } else { + panic("unreachable") + } + // Reset the accumulator, so that this Painter can be re-used. + m.y, m.x0, m.x1 = 0, 0, 0 + } else { + m.Painter.Paint(ss[:j], false) + } +} + +// NewMonochromePainter creates a new MonochromePainter that wraps the given +// Painter. +func NewMonochromePainter(p Painter) *MonochromePainter { + return &MonochromePainter{Painter: p} +} + +// A GammaCorrectionPainter wraps another Painter, performing gamma-correction +// on each Span's alpha value. +type GammaCorrectionPainter struct { + // The wrapped Painter. + Painter Painter + // Precomputed alpha values for linear interpolation, with fully opaque == 1<<16-1. + a [256]uint16 + // Whether gamma correction is a no-op. + gammaIsOne bool +} + +// Paint delegates to the wrapped Painter after performing gamma-correction +// on each Span. +func (g *GammaCorrectionPainter) Paint(ss []Span, done bool) { + if !g.gammaIsOne { + const ( + M = 0x1010101 // 255*M == 1<<32-1 + N = 0x8080 // N = M>>9, and N < 1<<16-1 + ) + for i, s := range ss { + if s.A == 0 || s.A == 1<<32-1 { + continue + } + p, q := s.A/M, (s.A%M)>>9 + // The resultant alpha is a linear interpolation of g.a[p] and g.a[p+1]. + a := uint32(g.a[p])*(N-q) + uint32(g.a[p+1])*q + a = (a + N/2) / N + // Convert the alpha from 16-bit (which is g.a's range) to 32-bit. + a |= a << 16 + ss[i].A = a + } + } + g.Painter.Paint(ss, done) +} + +// SetGamma sets the gamma value. +func (g *GammaCorrectionPainter) SetGamma(gamma float64) { + if gamma == 1.0 { + g.gammaIsOne = true + return + } + g.gammaIsOne = false + for i := 0; i < 256; i++ { + a := float64(i) / 0xff + a = math.Pow(a, gamma) + g.a[i] = uint16(0xffff * a) + } +} + +// NewGammaCorrectionPainter creates a new GammaCorrectionPainter that wraps +// the given Painter. +func NewGammaCorrectionPainter(p Painter, gamma float64) *GammaCorrectionPainter { + g := &GammaCorrectionPainter{Painter: p} + g.SetGamma(gamma) + return g +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/raster.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/raster.go new file mode 100644 index 000000000..45af7eaa2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/raster.go @@ -0,0 +1,579 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +// The raster package provides an anti-aliasing 2-D rasterizer. +// +// It is part of the larger Freetype-Go suite of font-related packages, +// but the raster package is not specific to font rasterization, and can +// be used standalone without any other Freetype-Go package. +// +// Rasterization is done by the same area/coverage accumulation algorithm +// as the Freetype "smooth" module, and the Anti-Grain Geometry library. +// A description of the area/coverage algorithm is at +// http://projects.tuxee.net/cl-vectors/section-the-cl-aa-algorithm +package raster + +import ( + "strconv" +) + +// A cell is part of a linked list (for a given yi co-ordinate) of accumulated +// area/coverage for the pixel at (xi, yi). +type cell struct { + xi int + area, cover int + next int +} + +type Rasterizer struct { + // If false, the default behavior is to use the even-odd winding fill + // rule during Rasterize. + UseNonZeroWinding bool + // An offset (in pixels) to the painted spans. + Dx, Dy int + + // The width of the Rasterizer. The height is implicit in len(cellIndex). + width int + // splitScaleN is the scaling factor used to determine how many times + // to decompose a quadratic or cubic segment into a linear approximation. + splitScale2, splitScale3 int + + // The current pen position. + a Point + // The current cell and its area/coverage being accumulated. + xi, yi int + area, cover int + + // Saved cells. + cell []cell + // Linked list of cells, one per row. + cellIndex []int + // Buffers. + cellBuf [256]cell + cellIndexBuf [64]int + spanBuf [64]Span +} + +// findCell returns the index in r.cell for the cell corresponding to +// (r.xi, r.yi). The cell is created if necessary. +func (r *Rasterizer) findCell() int { + if r.yi < 0 || r.yi >= len(r.cellIndex) { + return -1 + } + xi := r.xi + if xi < 0 { + xi = -1 + } else if xi > r.width { + xi = r.width + } + i, prev := r.cellIndex[r.yi], -1 + for i != -1 && r.cell[i].xi <= xi { + if r.cell[i].xi == xi { + return i + } + i, prev = r.cell[i].next, i + } + c := len(r.cell) + if c == cap(r.cell) { + buf := make([]cell, c, 4*c) + copy(buf, r.cell) + r.cell = buf[0 : c+1] + } else { + r.cell = r.cell[0 : c+1] + } + r.cell[c] = cell{xi, 0, 0, i} + if prev == -1 { + r.cellIndex[r.yi] = c + } else { + r.cell[prev].next = c + } + return c +} + +// saveCell saves any accumulated r.area/r.cover for (r.xi, r.yi). +func (r *Rasterizer) saveCell() { + if r.area != 0 || r.cover != 0 { + i := r.findCell() + if i != -1 { + r.cell[i].area += r.area + r.cell[i].cover += r.cover + } + r.area = 0 + r.cover = 0 + } +} + +// setCell sets the (xi, yi) cell that r is accumulating area/coverage for. +func (r *Rasterizer) setCell(xi, yi int) { + if r.xi != xi || r.yi != yi { + r.saveCell() + r.xi, r.yi = xi, yi + } +} + +// scan accumulates area/coverage for the yi'th scanline, going from +// x0 to x1 in the horizontal direction (in 24.8 fixed point co-ordinates) +// and from y0f to y1f fractional vertical units within that scanline. +func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { + // Break the 24.8 fixed point X co-ordinates into integral and fractional parts. + x0i := int(x0) / 256 + x0f := x0 - Fix32(256*x0i) + x1i := int(x1) / 256 + x1f := x1 - Fix32(256*x1i) + + // A perfectly horizontal scan. + if y0f == y1f { + r.setCell(x1i, yi) + return + } + dx, dy := x1-x0, y1f-y0f + // A single cell scan. + if x0i == x1i { + r.area += int((x0f + x1f) * dy) + r.cover += int(dy) + return + } + // There are at least two cells. Apart from the first and last cells, + // all intermediate cells go through the full width of the cell, + // or 256 units in 24.8 fixed point format. + var ( + p, q, edge0, edge1 Fix32 + xiDelta int + ) + if dx > 0 { + p, q = (256-x0f)*dy, dx + edge0, edge1, xiDelta = 0, 256, 1 + } else { + p, q = x0f*dy, -dx + edge0, edge1, xiDelta = 256, 0, -1 + } + yDelta, yRem := p/q, p%q + if yRem < 0 { + yDelta -= 1 + yRem += q + } + // Do the first cell. + xi, y := x0i, y0f + r.area += int((x0f + edge1) * yDelta) + r.cover += int(yDelta) + xi, y = xi+xiDelta, y+yDelta + r.setCell(xi, yi) + if xi != x1i { + // Do all the intermediate cells. + p = 256 * (y1f - y + yDelta) + fullDelta, fullRem := p/q, p%q + if fullRem < 0 { + fullDelta -= 1 + fullRem += q + } + yRem -= q + for xi != x1i { + yDelta = fullDelta + yRem += fullRem + if yRem >= 0 { + yDelta += 1 + yRem -= q + } + r.area += int(256 * yDelta) + r.cover += int(yDelta) + xi, y = xi+xiDelta, y+yDelta + r.setCell(xi, yi) + } + } + // Do the last cell. + yDelta = y1f - y + r.area += int((edge0 + x1f) * yDelta) + r.cover += int(yDelta) +} + +// Start starts a new curve at the given point. +func (r *Rasterizer) Start(a Point) { + r.setCell(int(a.X/256), int(a.Y/256)) + r.a = a +} + +// Add1 adds a linear segment to the current curve. +func (r *Rasterizer) Add1(b Point) { + x0, y0 := r.a.X, r.a.Y + x1, y1 := b.X, b.Y + dx, dy := x1-x0, y1-y0 + // Break the 24.8 fixed point Y co-ordinates into integral and fractional parts. + y0i := int(y0) / 256 + y0f := y0 - Fix32(256*y0i) + y1i := int(y1) / 256 + y1f := y1 - Fix32(256*y1i) + + if y0i == y1i { + // There is only one scanline. + r.scan(y0i, x0, y0f, x1, y1f) + + } else if dx == 0 { + // This is a vertical line segment. We avoid calling r.scan and instead + // manipulate r.area and r.cover directly. + var ( + edge0, edge1 Fix32 + yiDelta int + ) + if dy > 0 { + edge0, edge1, yiDelta = 0, 256, 1 + } else { + edge0, edge1, yiDelta = 256, 0, -1 + } + x0i, yi := int(x0)/256, y0i + x0fTimes2 := (int(x0) - (256 * x0i)) * 2 + // Do the first pixel. + dcover := int(edge1 - y0f) + darea := int(x0fTimes2 * dcover) + r.area += darea + r.cover += dcover + yi += yiDelta + r.setCell(x0i, yi) + // Do all the intermediate pixels. + dcover = int(edge1 - edge0) + darea = int(x0fTimes2 * dcover) + for yi != y1i { + r.area += darea + r.cover += dcover + yi += yiDelta + r.setCell(x0i, yi) + } + // Do the last pixel. + dcover = int(y1f - edge0) + darea = int(x0fTimes2 * dcover) + r.area += darea + r.cover += dcover + + } else { + // There are at least two scanlines. Apart from the first and last scanlines, + // all intermediate scanlines go through the full height of the row, or 256 + // units in 24.8 fixed point format. + var ( + p, q, edge0, edge1 Fix32 + yiDelta int + ) + if dy > 0 { + p, q = (256-y0f)*dx, dy + edge0, edge1, yiDelta = 0, 256, 1 + } else { + p, q = y0f*dx, -dy + edge0, edge1, yiDelta = 256, 0, -1 + } + xDelta, xRem := p/q, p%q + if xRem < 0 { + xDelta -= 1 + xRem += q + } + // Do the first scanline. + x, yi := x0, y0i + r.scan(yi, x, y0f, x+xDelta, edge1) + x, yi = x+xDelta, yi+yiDelta + r.setCell(int(x)/256, yi) + if yi != y1i { + // Do all the intermediate scanlines. + p = 256 * dx + fullDelta, fullRem := p/q, p%q + if fullRem < 0 { + fullDelta -= 1 + fullRem += q + } + xRem -= q + for yi != y1i { + xDelta = fullDelta + xRem += fullRem + if xRem >= 0 { + xDelta += 1 + xRem -= q + } + r.scan(yi, x, edge0, x+xDelta, edge1) + x, yi = x+xDelta, yi+yiDelta + r.setCell(int(x)/256, yi) + } + } + // Do the last scanline. + r.scan(yi, x, edge0, x1, y1f) + } + // The next lineTo starts from b. + r.a = b +} + +// Add2 adds a quadratic segment to the current curve. +func (r *Rasterizer) Add2(b, c Point) { + // Calculate nSplit (the number of recursive decompositions) based on how `curvy' it is. + // Specifically, how much the middle point b deviates from (a+c)/2. + dev := maxAbs(r.a.X-2*b.X+c.X, r.a.Y-2*b.Y+c.Y) / Fix32(r.splitScale2) + nsplit := 0 + for dev > 0 { + dev /= 4 + nsplit++ + } + // dev is 32-bit, and nsplit++ every time we shift off 2 bits, so maxNsplit is 16. + const maxNsplit = 16 + if nsplit > maxNsplit { + panic("freetype/raster: Add2 nsplit too large: " + strconv.Itoa(nsplit)) + } + // Recursively decompose the curve nSplit levels deep. + var ( + pStack [2*maxNsplit + 3]Point + sStack [maxNsplit + 1]int + i int + ) + sStack[0] = nsplit + pStack[0] = c + pStack[1] = b + pStack[2] = r.a + for i >= 0 { + s := sStack[i] + p := pStack[2*i:] + if s > 0 { + // Split the quadratic curve p[:3] into an equivalent set of two shorter curves: + // p[:3] and p[2:5]. The new p[4] is the old p[2], and p[0] is unchanged. + mx := p[1].X + p[4].X = p[2].X + p[3].X = (p[4].X + mx) / 2 + p[1].X = (p[0].X + mx) / 2 + p[2].X = (p[1].X + p[3].X) / 2 + my := p[1].Y + p[4].Y = p[2].Y + p[3].Y = (p[4].Y + my) / 2 + p[1].Y = (p[0].Y + my) / 2 + p[2].Y = (p[1].Y + p[3].Y) / 2 + // The two shorter curves have one less split to do. + sStack[i] = s - 1 + sStack[i+1] = s - 1 + i++ + } else { + // Replace the level-0 quadratic with a two-linear-piece approximation. + midx := (p[0].X + 2*p[1].X + p[2].X) / 4 + midy := (p[0].Y + 2*p[1].Y + p[2].Y) / 4 + r.Add1(Point{midx, midy}) + r.Add1(p[0]) + i-- + } + } +} + +// Add3 adds a cubic segment to the current curve. +func (r *Rasterizer) Add3(b, c, d Point) { + // Calculate nSplit (the number of recursive decompositions) based on how `curvy' it is. + dev2 := maxAbs(r.a.X-3*(b.X+c.X)+d.X, r.a.Y-3*(b.Y+c.Y)+d.Y) / Fix32(r.splitScale2) + dev3 := maxAbs(r.a.X-2*b.X+d.X, r.a.Y-2*b.Y+d.Y) / Fix32(r.splitScale3) + nsplit := 0 + for dev2 > 0 || dev3 > 0 { + dev2 /= 8 + dev3 /= 4 + nsplit++ + } + // devN is 32-bit, and nsplit++ every time we shift off 2 bits, so maxNsplit is 16. + const maxNsplit = 16 + if nsplit > maxNsplit { + panic("freetype/raster: Add3 nsplit too large: " + strconv.Itoa(nsplit)) + } + // Recursively decompose the curve nSplit levels deep. + var ( + pStack [3*maxNsplit + 4]Point + sStack [maxNsplit + 1]int + i int + ) + sStack[0] = nsplit + pStack[0] = d + pStack[1] = c + pStack[2] = b + pStack[3] = r.a + for i >= 0 { + s := sStack[i] + p := pStack[3*i:] + if s > 0 { + // Split the cubic curve p[:4] into an equivalent set of two shorter curves: + // p[:4] and p[3:7]. The new p[6] is the old p[3], and p[0] is unchanged. + m01x := (p[0].X + p[1].X) / 2 + m12x := (p[1].X + p[2].X) / 2 + m23x := (p[2].X + p[3].X) / 2 + p[6].X = p[3].X + p[5].X = m23x + p[1].X = m01x + p[2].X = (m01x + m12x) / 2 + p[4].X = (m12x + m23x) / 2 + p[3].X = (p[2].X + p[4].X) / 2 + m01y := (p[0].Y + p[1].Y) / 2 + m12y := (p[1].Y + p[2].Y) / 2 + m23y := (p[2].Y + p[3].Y) / 2 + p[6].Y = p[3].Y + p[5].Y = m23y + p[1].Y = m01y + p[2].Y = (m01y + m12y) / 2 + p[4].Y = (m12y + m23y) / 2 + p[3].Y = (p[2].Y + p[4].Y) / 2 + // The two shorter curves have one less split to do. + sStack[i] = s - 1 + sStack[i+1] = s - 1 + i++ + } else { + // Replace the level-0 cubic with a two-linear-piece approximation. + midx := (p[0].X + 3*(p[1].X+p[2].X) + p[3].X) / 8 + midy := (p[0].Y + 3*(p[1].Y+p[2].Y) + p[3].Y) / 8 + r.Add1(Point{midx, midy}) + r.Add1(p[0]) + i-- + } + } +} + +// AddPath adds the given Path. +func (r *Rasterizer) AddPath(p Path) { + for i := 0; i < len(p); { + switch p[i] { + case 0: + r.Start(Point{p[i+1], p[i+2]}) + i += 4 + case 1: + r.Add1(Point{p[i+1], p[i+2]}) + i += 4 + case 2: + r.Add2(Point{p[i+1], p[i+2]}, Point{p[i+3], p[i+4]}) + i += 6 + case 3: + r.Add3(Point{p[i+1], p[i+2]}, Point{p[i+3], p[i+4]}, Point{p[i+5], p[i+6]}) + i += 8 + default: + panic("freetype/raster: bad path") + } + } +} + +// AddStroke adds a stroked Path. +func (r *Rasterizer) AddStroke(q Path, width Fix32, cr Capper, jr Joiner) { + Stroke(r, q, width, cr, jr) +} + +// Converts an area value to a uint32 alpha value. A completely filled pixel +// corresponds to an area of 256*256*2, and an alpha of 1<<32-1. The +// conversion of area values greater than this depends on the winding rule: +// even-odd or non-zero. +func (r *Rasterizer) areaToAlpha(area int) uint32 { + // The C Freetype implementation (version 2.3.12) does "alpha := area>>1" without + // the +1. Round-to-nearest gives a more symmetric result than round-down. + // The C implementation also returns 8-bit alpha, not 32-bit alpha. + a := (area + 1) >> 1 + if a < 0 { + a = -a + } + alpha := uint32(a) + if r.UseNonZeroWinding { + if alpha > 0xffff { + alpha = 0xffff + } + } else { + alpha &= 0x1ffff + if alpha > 0x10000 { + alpha = 0x20000 - alpha + } else if alpha == 0x10000 { + alpha = 0x0ffff + } + } + alpha |= alpha << 16 + return alpha +} + +// Rasterize converts r's accumulated curves into Spans for p. The Spans +// passed to p are non-overlapping, and sorted by Y and then X. They all +// have non-zero width (and 0 <= X0 < X1 <= r.width) and non-zero A, except +// for the final Span, which has Y, X0, X1 and A all equal to zero. +func (r *Rasterizer) Rasterize(p Painter) { + r.saveCell() + s := 0 + for yi := 0; yi < len(r.cellIndex); yi++ { + xi, cover := 0, 0 + for c := r.cellIndex[yi]; c != -1; c = r.cell[c].next { + if cover != 0 && r.cell[c].xi > xi { + alpha := r.areaToAlpha(cover * 256 * 2) + if alpha != 0 { + xi0, xi1 := xi, r.cell[c].xi + if xi0 < 0 { + xi0 = 0 + } + if xi1 >= r.width { + xi1 = r.width + } + if xi0 < xi1 { + r.spanBuf[s] = Span{yi + r.Dy, xi0 + r.Dx, xi1 + r.Dx, alpha} + s++ + } + } + } + cover += r.cell[c].cover + alpha := r.areaToAlpha(cover*256*2 - r.cell[c].area) + xi = r.cell[c].xi + 1 + if alpha != 0 { + xi0, xi1 := r.cell[c].xi, xi + if xi0 < 0 { + xi0 = 0 + } + if xi1 >= r.width { + xi1 = r.width + } + if xi0 < xi1 { + r.spanBuf[s] = Span{yi + r.Dy, xi0 + r.Dx, xi1 + r.Dx, alpha} + s++ + } + } + if s > len(r.spanBuf)-2 { + p.Paint(r.spanBuf[:s], false) + s = 0 + } + } + } + p.Paint(r.spanBuf[:s], true) +} + +// Clear cancels any previous calls to r.Start or r.AddXxx. +func (r *Rasterizer) Clear() { + r.a = Point{} + r.xi = 0 + r.yi = 0 + r.area = 0 + r.cover = 0 + r.cell = r.cell[:0] + for i := 0; i < len(r.cellIndex); i++ { + r.cellIndex[i] = -1 + } +} + +// SetBounds sets the maximum width and height of the rasterized image and +// calls Clear. The width and height are in pixels, not Fix32 units. +func (r *Rasterizer) SetBounds(width, height int) { + if width < 0 { + width = 0 + } + if height < 0 { + height = 0 + } + // Use the same ssN heuristic as the C Freetype implementation. + // The C implementation uses the values 32, 16, but those are in + // 26.6 fixed point units, and we use 24.8 fixed point everywhere. + ss2, ss3 := 128, 64 + if width > 24 || height > 24 { + ss2, ss3 = 2*ss2, 2*ss3 + if width > 120 || height > 120 { + ss2, ss3 = 2*ss2, 2*ss3 + } + } + r.width = width + r.splitScale2 = ss2 + r.splitScale3 = ss3 + r.cell = r.cellBuf[:0] + if height > len(r.cellIndexBuf) { + r.cellIndex = make([]int, height) + } else { + r.cellIndex = r.cellIndexBuf[:height] + } + r.Clear() +} + +// NewRasterizer creates a new Rasterizer with the given bounds. +func NewRasterizer(width, height int) *Rasterizer { + r := new(Rasterizer) + r.SetBounds(width, height) + return r +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/stroke.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/stroke.go new file mode 100644 index 000000000..d49b1cee9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/raster/stroke.go @@ -0,0 +1,466 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package raster + +// Two points are considered practically equal if the square of the distance +// between them is less than one quarter (i.e. 16384 / 65536 in Fix64). +const epsilon = 16384 + +// A Capper signifies how to begin or end a stroked path. +type Capper interface { + // Cap adds a cap to p given a pivot point and the normal vector of a + // terminal segment. The normal's length is half of the stroke width. + Cap(p Adder, halfWidth Fix32, pivot, n1 Point) +} + +// The CapperFunc type adapts an ordinary function to be a Capper. +type CapperFunc func(Adder, Fix32, Point, Point) + +func (f CapperFunc) Cap(p Adder, halfWidth Fix32, pivot, n1 Point) { + f(p, halfWidth, pivot, n1) +} + +// A Joiner signifies how to join interior nodes of a stroked path. +type Joiner interface { + // Join adds a join to the two sides of a stroked path given a pivot + // point and the normal vectors of the trailing and leading segments. + // Both normals have length equal to half of the stroke width. + Join(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) +} + +// The JoinerFunc type adapts an ordinary function to be a Joiner. +type JoinerFunc func(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) + +func (f JoinerFunc) Join(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) { + f(lhs, rhs, halfWidth, pivot, n0, n1) +} + +// RoundCapper adds round caps to a stroked path. +var RoundCapper Capper = CapperFunc(roundCapper) + +func roundCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { + // The cubic Bézier approximation to a circle involves the magic number + // (√2 - 1) * 4/3, which is approximately 141/256. + const k = 141 + e := n1.Rot90CCW() + side := pivot.Add(e) + start, end := pivot.Sub(n1), pivot.Add(n1) + d, e := n1.Mul(k), e.Mul(k) + p.Add3(start.Add(e), side.Sub(d), side) + p.Add3(side.Add(d), end.Add(e), end) +} + +// ButtCapper adds butt caps to a stroked path. +var ButtCapper Capper = CapperFunc(buttCapper) + +func buttCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { + p.Add1(pivot.Add(n1)) +} + +// SquareCapper adds square caps to a stroked path. +var SquareCapper Capper = CapperFunc(squareCapper) + +func squareCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { + e := n1.Rot90CCW() + side := pivot.Add(e) + p.Add1(side.Sub(n1)) + p.Add1(side.Add(n1)) + p.Add1(pivot.Add(n1)) +} + +// RoundJoiner adds round joins to a stroked path. +var RoundJoiner Joiner = JoinerFunc(roundJoiner) + +func roundJoiner(lhs, rhs Adder, haflWidth Fix32, pivot, n0, n1 Point) { + dot := n0.Rot90CW().Dot(n1) + if dot >= 0 { + addArc(lhs, pivot, n0, n1) + rhs.Add1(pivot.Sub(n1)) + } else { + lhs.Add1(pivot.Add(n1)) + addArc(rhs, pivot, n0.Neg(), n1.Neg()) + } +} + +// BevelJoiner adds bevel joins to a stroked path. +var BevelJoiner Joiner = JoinerFunc(bevelJoiner) + +func bevelJoiner(lhs, rhs Adder, haflWidth Fix32, pivot, n0, n1 Point) { + lhs.Add1(pivot.Add(n1)) + rhs.Add1(pivot.Sub(n1)) +} + +// addArc adds a circular arc from pivot+n0 to pivot+n1 to p. The shorter of +// the two possible arcs is taken, i.e. the one spanning <= 180 degrees. +// The two vectors n0 and n1 must be of equal length. +func addArc(p Adder, pivot, n0, n1 Point) { + // r2 is the square of the length of n0. + r2 := n0.Dot(n0) + if r2 < epsilon { + // The arc radius is so small that we collapse to a straight line. + p.Add1(pivot.Add(n1)) + return + } + // We approximate the arc by 0, 1, 2 or 3 45-degree quadratic segments plus + // a final quadratic segment from s to n1. Each 45-degree segment has control + // points {1, 0}, {1, tan(π/8)} and {1/√2, 1/√2} suitably scaled, rotated and + // translated. tan(π/8) is approximately 106/256. + const tpo8 = 106 + var s Point + // We determine which octant the angle between n0 and n1 is in via three dot products. + // m0, m1 and m2 are n0 rotated clockwise by 45, 90 and 135 degrees. + m0 := n0.Rot45CW() + m1 := n0.Rot90CW() + m2 := m0.Rot90CW() + if m1.Dot(n1) >= 0 { + if n0.Dot(n1) >= 0 { + if m2.Dot(n1) <= 0 { + // n1 is between 0 and 45 degrees clockwise of n0. + s = n0 + } else { + // n1 is between 45 and 90 degrees clockwise of n0. + p.Add2(pivot.Add(n0).Add(m1.Mul(tpo8)), pivot.Add(m0)) + s = m0 + } + } else { + pm1, n0t := pivot.Add(m1), n0.Mul(tpo8) + p.Add2(pivot.Add(n0).Add(m1.Mul(tpo8)), pivot.Add(m0)) + p.Add2(pm1.Add(n0t), pm1) + if m0.Dot(n1) >= 0 { + // n1 is between 90 and 135 degrees clockwise of n0. + s = m1 + } else { + // n1 is between 135 and 180 degrees clockwise of n0. + p.Add2(pm1.Sub(n0t), pivot.Add(m2)) + s = m2 + } + } + } else { + if n0.Dot(n1) >= 0 { + if m0.Dot(n1) >= 0 { + // n1 is between 0 and 45 degrees counter-clockwise of n0. + s = n0 + } else { + // n1 is between 45 and 90 degrees counter-clockwise of n0. + p.Add2(pivot.Add(n0).Sub(m1.Mul(tpo8)), pivot.Sub(m2)) + s = m2.Neg() + } + } else { + pm1, n0t := pivot.Sub(m1), n0.Mul(tpo8) + p.Add2(pivot.Add(n0).Sub(m1.Mul(tpo8)), pivot.Sub(m2)) + p.Add2(pm1.Add(n0t), pm1) + if m2.Dot(n1) <= 0 { + // n1 is between 90 and 135 degrees counter-clockwise of n0. + s = m1.Neg() + } else { + // n1 is between 135 and 180 degrees counter-clockwise of n0. + p.Add2(pm1.Sub(n0t), pivot.Sub(m0)) + s = m0.Neg() + } + } + } + // The final quadratic segment has two endpoints s and n1 and the middle + // control point is a multiple of s.Add(n1), i.e. it is on the angle bisector + // of those two points. The multiple ranges between 128/256 and 150/256 as + // the angle between s and n1 ranges between 0 and 45 degrees. + // When the angle is 0 degrees (i.e. s and n1 are coincident) then s.Add(n1) + // is twice s and so the middle control point of the degenerate quadratic + // segment should be half s.Add(n1), and half = 128/256. + // When the angle is 45 degrees then 150/256 is the ratio of the lengths of + // the two vectors {1, tan(π/8)} and {1 + 1/√2, 1/√2}. + // d is the normalized dot product between s and n1. Since the angle ranges + // between 0 and 45 degrees then d ranges between 256/256 and 181/256. + d := 256 * s.Dot(n1) / r2 + multiple := Fix32(150 - 22*(d-181)/(256-181)) + p.Add2(pivot.Add(s.Add(n1).Mul(multiple)), pivot.Add(n1)) +} + +// midpoint returns the midpoint of two Points. +func midpoint(a, b Point) Point { + return Point{(a.X + b.X) / 2, (a.Y + b.Y) / 2} +} + +// angleGreaterThan45 returns whether the angle between two vectors is more +// than 45 degrees. +func angleGreaterThan45(v0, v1 Point) bool { + v := v0.Rot45CCW() + return v.Dot(v1) < 0 || v.Rot90CW().Dot(v1) < 0 +} + +// interpolate returns the point (1-t)*a + t*b. +func interpolate(a, b Point, t Fix64) Point { + s := 65536 - t + x := s*Fix64(a.X) + t*Fix64(b.X) + y := s*Fix64(a.Y) + t*Fix64(b.Y) + return Point{Fix32(x >> 16), Fix32(y >> 16)} +} + +// curviest2 returns the value of t for which the quadratic parametric curve +// (1-t)²*a + 2*t*(1-t).b + t²*c has maximum curvature. +// +// The curvature of the parametric curve f(t) = (x(t), y(t)) is +// |x′y″-y′x″| / (x′²+y′²)^(3/2). +// +// Let d = b-a and e = c-2*b+a, so that f′(t) = 2*d+2*e*t and f″(t) = 2*e. +// The curvature's numerator is (2*dx+2*ex*t)*(2*ey)-(2*dy+2*ey*t)*(2*ex), +// which simplifies to 4*dx*ey-4*dy*ex, which is constant with respect to t. +// +// Thus, curvature is extreme where the denominator is extreme, i.e. where +// (x′²+y′²) is extreme. The first order condition is that +// 2*x′*x″+2*y′*y″ = 0, or (dx+ex*t)*ex + (dy+ey*t)*ey = 0. +// Solving for t gives t = -(dx*ex+dy*ey) / (ex*ex+ey*ey). +func curviest2(a, b, c Point) Fix64 { + dx := int64(b.X - a.X) + dy := int64(b.Y - a.Y) + ex := int64(c.X - 2*b.X + a.X) + ey := int64(c.Y - 2*b.Y + a.Y) + if ex == 0 && ey == 0 { + return 32768 + } + return Fix64(-65536 * (dx*ex + dy*ey) / (ex*ex + ey*ey)) +} + +// A stroker holds state for stroking a path. +type stroker struct { + // p is the destination that records the stroked path. + p Adder + // u is the half-width of the stroke. + u Fix32 + // cr and jr specify how to end and connect path segments. + cr Capper + jr Joiner + // r is the reverse path. Stroking a path involves constructing two + // parallel paths 2*u apart. The first path is added immediately to p, + // the second path is accumulated in r and eventually added in reverse. + r Path + // a is the most recent segment point. anorm is the segment normal of + // length u at that point. + a, anorm Point +} + +// addNonCurvy2 adds a quadratic segment to the stroker, where the segment +// defined by (k.a, b, c) achieves maximum curvature at either k.a or c. +func (k *stroker) addNonCurvy2(b, c Point) { + // We repeatedly divide the segment at its middle until it is straight + // enough to approximate the stroke by just translating the control points. + // ds and ps are stacks of depths and points. t is the top of the stack. + const maxDepth = 5 + var ( + ds [maxDepth + 1]int + ps [2*maxDepth + 3]Point + t int + ) + // Initially the ps stack has one quadratic segment of depth zero. + ds[0] = 0 + ps[2] = k.a + ps[1] = b + ps[0] = c + anorm := k.anorm + var cnorm Point + + for { + depth := ds[t] + a := ps[2*t+2] + b := ps[2*t+1] + c := ps[2*t+0] + ab := b.Sub(a) + bc := c.Sub(b) + abIsSmall := ab.Dot(ab) < Fix64(1<<16) + bcIsSmall := bc.Dot(bc) < Fix64(1<<16) + if abIsSmall && bcIsSmall { + // Approximate the segment by a circular arc. + cnorm = bc.Norm(k.u).Rot90CCW() + mac := midpoint(a, c) + addArc(k.p, mac, anorm, cnorm) + addArc(&k.r, mac, anorm.Neg(), cnorm.Neg()) + } else if depth < maxDepth && angleGreaterThan45(ab, bc) { + // Divide the segment in two and push both halves on the stack. + mab := midpoint(a, b) + mbc := midpoint(b, c) + t++ + ds[t+0] = depth + 1 + ds[t-1] = depth + 1 + ps[2*t+2] = a + ps[2*t+1] = mab + ps[2*t+0] = midpoint(mab, mbc) + ps[2*t-1] = mbc + continue + } else { + // Translate the control points. + bnorm := c.Sub(a).Norm(k.u).Rot90CCW() + cnorm = bc.Norm(k.u).Rot90CCW() + k.p.Add2(b.Add(bnorm), c.Add(cnorm)) + k.r.Add2(b.Sub(bnorm), c.Sub(cnorm)) + } + if t == 0 { + k.a, k.anorm = c, cnorm + return + } + t-- + anorm = cnorm + } + panic("unreachable") +} + +// Add1 adds a linear segment to the stroker. +func (k *stroker) Add1(b Point) { + bnorm := b.Sub(k.a).Norm(k.u).Rot90CCW() + if len(k.r) == 0 { + k.p.Start(k.a.Add(bnorm)) + k.r.Start(k.a.Sub(bnorm)) + } else { + k.jr.Join(k.p, &k.r, k.u, k.a, k.anorm, bnorm) + } + k.p.Add1(b.Add(bnorm)) + k.r.Add1(b.Sub(bnorm)) + k.a, k.anorm = b, bnorm +} + +// Add2 adds a quadratic segment to the stroker. +func (k *stroker) Add2(b, c Point) { + ab := b.Sub(k.a) + bc := c.Sub(b) + abnorm := ab.Norm(k.u).Rot90CCW() + if len(k.r) == 0 { + k.p.Start(k.a.Add(abnorm)) + k.r.Start(k.a.Sub(abnorm)) + } else { + k.jr.Join(k.p, &k.r, k.u, k.a, k.anorm, abnorm) + } + + // Approximate nearly-degenerate quadratics by linear segments. + abIsSmall := ab.Dot(ab) < epsilon + bcIsSmall := bc.Dot(bc) < epsilon + if abIsSmall || bcIsSmall { + acnorm := c.Sub(k.a).Norm(k.u).Rot90CCW() + k.p.Add1(c.Add(acnorm)) + k.r.Add1(c.Sub(acnorm)) + k.a, k.anorm = c, acnorm + return + } + + // The quadratic segment (k.a, b, c) has a point of maximum curvature. + // If this occurs at an end point, we process the segment as a whole. + t := curviest2(k.a, b, c) + if t <= 0 || t >= 65536 { + k.addNonCurvy2(b, c) + return + } + + // Otherwise, we perform a de Casteljau decomposition at the point of + // maximum curvature and process the two straighter parts. + mab := interpolate(k.a, b, t) + mbc := interpolate(b, c, t) + mabc := interpolate(mab, mbc, t) + + // If the vectors ab and bc are close to being in opposite directions, + // then the decomposition can become unstable, so we approximate the + // quadratic segment by two linear segments joined by an arc. + bcnorm := bc.Norm(k.u).Rot90CCW() + if abnorm.Dot(bcnorm) < -Fix64(k.u)*Fix64(k.u)*2047/2048 { + pArc := abnorm.Dot(bc) < 0 + + k.p.Add1(mabc.Add(abnorm)) + if pArc { + z := abnorm.Rot90CW() + addArc(k.p, mabc, abnorm, z) + addArc(k.p, mabc, z, bcnorm) + } + k.p.Add1(mabc.Add(bcnorm)) + k.p.Add1(c.Add(bcnorm)) + + k.r.Add1(mabc.Sub(abnorm)) + if !pArc { + z := abnorm.Rot90CW() + addArc(&k.r, mabc, abnorm.Neg(), z) + addArc(&k.r, mabc, z, bcnorm.Neg()) + } + k.r.Add1(mabc.Sub(bcnorm)) + k.r.Add1(c.Sub(bcnorm)) + + k.a, k.anorm = c, bcnorm + return + } + + // Process the decomposed parts. + k.addNonCurvy2(mab, mabc) + k.addNonCurvy2(mbc, c) +} + +// Add3 adds a cubic segment to the stroker. +func (k *stroker) Add3(b, c, d Point) { + panic("freetype/raster: stroke unimplemented for cubic segments") +} + +// stroke adds the stroked Path q to p, where q consists of exactly one curve. +func (k *stroker) stroke(q Path) { + // Stroking is implemented by deriving two paths each k.u apart from q. + // The left-hand-side path is added immediately to k.p; the right-hand-side + // path is accumulated in k.r. Once we've finished adding the LHS to k.p, + // we add the RHS in reverse order. + k.r = make(Path, 0, len(q)) + k.a = Point{q[1], q[2]} + for i := 4; i < len(q); { + switch q[i] { + case 1: + k.Add1(Point{q[i+1], q[i+2]}) + i += 4 + case 2: + k.Add2(Point{q[i+1], q[i+2]}, Point{q[i+3], q[i+4]}) + i += 6 + case 3: + k.Add3(Point{q[i+1], q[i+2]}, Point{q[i+3], q[i+4]}, Point{q[i+5], q[i+6]}) + i += 8 + default: + panic("freetype/raster: bad path") + } + } + if len(k.r) == 0 { + return + } + // TODO(nigeltao): if q is a closed curve then we should join the first and + // last segments instead of capping them. + k.cr.Cap(k.p, k.u, q.lastPoint(), k.anorm.Neg()) + addPathReversed(k.p, k.r) + pivot := q.firstPoint() + k.cr.Cap(k.p, k.u, pivot, pivot.Sub(Point{k.r[1], k.r[2]})) +} + +// Stroke adds q stroked with the given width to p. The result is typically +// self-intersecting and should be rasterized with UseNonZeroWinding. +// cr and jr may be nil, which defaults to a RoundCapper or RoundJoiner. +func Stroke(p Adder, q Path, width Fix32, cr Capper, jr Joiner) { + if len(q) == 0 { + return + } + if cr == nil { + cr = RoundCapper + } + if jr == nil { + jr = RoundJoiner + } + if q[0] != 0 { + panic("freetype/raster: bad path") + } + s := stroker{p: p, u: width / 2, cr: cr, jr: jr} + i := 0 + for j := 4; j < len(q); { + switch q[j] { + case 0: + s.stroke(q[i:j]) + i, j = j, j+4 + case 1: + j += 4 + case 2: + j += 6 + case 3: + j += 8 + default: + panic("freetype/raster: bad path") + } + } + s.stroke(q[i:]) +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/glyph.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/glyph.go new file mode 100644 index 000000000..b5f327851 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/glyph.go @@ -0,0 +1,530 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +// Hinting is the policy for snapping a glyph's contours to pixel boundaries. +type Hinting int32 + +const ( + // NoHinting means to not perform any hinting. + NoHinting Hinting = iota + // FullHinting means to use the font's hinting instructions. + FullHinting + + // TODO: implement VerticalHinting. +) + +// A Point is a co-ordinate pair plus whether it is ``on'' a contour or an +// ``off'' control point. +type Point struct { + X, Y int32 + // The Flags' LSB means whether or not this Point is ``on'' the contour. + // Other bits are reserved for internal use. + Flags uint32 +} + +// A GlyphBuf holds a glyph's contours. A GlyphBuf can be re-used to load a +// series of glyphs from a Font. +type GlyphBuf struct { + // AdvanceWidth is the glyph's advance width. + AdvanceWidth int32 + // B is the glyph's bounding box. + B Bounds + // Point contains all Points from all contours of the glyph. If + // hinting was used to load a glyph then Unhinted contains those + // Points before they were hinted, and InFontUnits contains those + // Points before they were hinted and scaled. + Point, Unhinted, InFontUnits []Point + // End is the point indexes of the end point of each countour. The + // length of End is the number of contours in the glyph. The i'th + // contour consists of points Point[End[i-1]:End[i]], where End[-1] + // is interpreted to mean zero. + End []int + + font *Font + scale int32 + hinting Hinting + hinter hinter + // phantomPoints are the co-ordinates of the synthetic phantom points + // used for hinting and bounding box calculations. + phantomPoints [4]Point + // pp1x is the X co-ordinate of the first phantom point. The '1' is + // using 1-based indexing; pp1x is almost always phantomPoints[0].X. + // TODO: eliminate this and consistently use phantomPoints[0].X. + pp1x int32 + // metricsSet is whether the glyph's metrics have been set yet. For a + // compound glyph, a sub-glyph may override the outer glyph's metrics. + metricsSet bool + // tmp is a scratch buffer. + tmp []Point +} + +// Flags for decoding a glyph's contours. These flags are documented at +// http://developer.apple.com/fonts/TTRefMan/RM06/Chap6glyf.html. +const ( + flagOnCurve = 1 << iota + flagXShortVector + flagYShortVector + flagRepeat + flagPositiveXShortVector + flagPositiveYShortVector + + // The remaining flags are for internal use. + flagTouchedX + flagTouchedY +) + +// The same flag bits (0x10 and 0x20) are overloaded to have two meanings, +// dependent on the value of the flag{X,Y}ShortVector bits. +const ( + flagThisXIsSame = flagPositiveXShortVector + flagThisYIsSame = flagPositiveYShortVector +) + +// Load loads a glyph's contours from a Font, overwriting any previously +// loaded contours for this GlyphBuf. scale is the number of 26.6 fixed point +// units in 1 em, i is the glyph index, and h is the hinting policy. +func (g *GlyphBuf) Load(f *Font, scale int32, i Index, h Hinting) error { + g.Point = g.Point[:0] + g.Unhinted = g.Unhinted[:0] + g.InFontUnits = g.InFontUnits[:0] + g.End = g.End[:0] + g.font = f + g.hinting = h + g.scale = scale + g.pp1x = 0 + g.phantomPoints = [4]Point{} + g.metricsSet = false + + if h != NoHinting { + if err := g.hinter.init(f, scale); err != nil { + return err + } + } + if err := g.load(0, i, true); err != nil { + return err + } + // TODO: this selection of either g.pp1x or g.phantomPoints[0].X isn't ideal, + // and should be cleaned up once we have all the testScaling tests passing, + // plus additional tests for Freetype-Go's bounding boxes matching C Freetype's. + pp1x := g.pp1x + if h != NoHinting { + pp1x = g.phantomPoints[0].X + } + if pp1x != 0 { + for i := range g.Point { + g.Point[i].X -= pp1x + } + } + + advanceWidth := g.phantomPoints[1].X - g.phantomPoints[0].X + if h != NoHinting { + if len(f.hdmx) >= 8 { + if n := u32(f.hdmx, 4); n > 3+uint32(i) { + for hdmx := f.hdmx[8:]; uint32(len(hdmx)) >= n; hdmx = hdmx[n:] { + if int32(hdmx[0]) == scale>>6 { + advanceWidth = int32(hdmx[2+i]) << 6 + break + } + } + } + } + advanceWidth = (advanceWidth + 32) &^ 63 + } + g.AdvanceWidth = advanceWidth + + // Set g.B to the 'control box', which is the bounding box of the Bézier + // curves' control points. This is easier to calculate, no smaller than + // and often equal to the tightest possible bounding box of the curves + // themselves. This approach is what C Freetype does. We can't just scale + // the nominal bounding box in the glyf data as the hinting process and + // phantom point adjustment may move points outside of that box. + if len(g.Point) == 0 { + g.B = Bounds{} + } else { + p := g.Point[0] + g.B.XMin = p.X + g.B.XMax = p.X + g.B.YMin = p.Y + g.B.YMax = p.Y + for _, p := range g.Point[1:] { + if g.B.XMin > p.X { + g.B.XMin = p.X + } else if g.B.XMax < p.X { + g.B.XMax = p.X + } + if g.B.YMin > p.Y { + g.B.YMin = p.Y + } else if g.B.YMax < p.Y { + g.B.YMax = p.Y + } + } + // Snap the box to the grid, if hinting is on. + if h != NoHinting { + g.B.XMin &^= 63 + g.B.YMin &^= 63 + g.B.XMax += 63 + g.B.XMax &^= 63 + g.B.YMax += 63 + g.B.YMax &^= 63 + } + } + return nil +} + +func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) { + // The recursion limit here is arbitrary, but defends against malformed glyphs. + if recursion >= 32 { + return UnsupportedError("excessive compound glyph recursion") + } + // Find the relevant slice of g.font.glyf. + var g0, g1 uint32 + if g.font.locaOffsetFormat == locaOffsetFormatShort { + g0 = 2 * uint32(u16(g.font.loca, 2*int(i))) + g1 = 2 * uint32(u16(g.font.loca, 2*int(i)+2)) + } else { + g0 = u32(g.font.loca, 4*int(i)) + g1 = u32(g.font.loca, 4*int(i)+4) + } + + // Decode the contour count and nominal bounding box, from the first + // 10 bytes of the glyf data. boundsYMin and boundsXMax, at offsets 4 + // and 6, are unused. + glyf, ne, boundsXMin, boundsYMax := []byte(nil), 0, int32(0), int32(0) + if g0+10 <= g1 { + glyf = g.font.glyf[g0:g1] + ne = int(int16(u16(glyf, 0))) + boundsXMin = int32(int16(u16(glyf, 2))) + boundsYMax = int32(int16(u16(glyf, 8))) + } + + // Create the phantom points. + uhm, pp1x := g.font.unscaledHMetric(i), int32(0) + uvm := g.font.unscaledVMetric(i, boundsYMax) + g.phantomPoints = [4]Point{ + {X: boundsXMin - uhm.LeftSideBearing}, + {X: boundsXMin - uhm.LeftSideBearing + uhm.AdvanceWidth}, + {X: uhm.AdvanceWidth / 2, Y: boundsYMax + uvm.TopSideBearing}, + {X: uhm.AdvanceWidth / 2, Y: boundsYMax + uvm.TopSideBearing - uvm.AdvanceHeight}, + } + if len(glyf) == 0 { + g.addPhantomsAndScale(len(g.Point), len(g.Point), true, true) + copy(g.phantomPoints[:], g.Point[len(g.Point)-4:]) + g.Point = g.Point[:len(g.Point)-4] + return nil + } + + // Load and hint the contours. + if ne < 0 { + if ne != -1 { + // http://developer.apple.com/fonts/TTRefMan/RM06/Chap6glyf.html says that + // "the values -2, -3, and so forth, are reserved for future use." + return UnsupportedError("negative number of contours") + } + pp1x = g.font.scale(g.scale * (boundsXMin - uhm.LeftSideBearing)) + if err := g.loadCompound(recursion, uhm, i, glyf, useMyMetrics); err != nil { + return err + } + } else { + np0, ne0 := len(g.Point), len(g.End) + program := g.loadSimple(glyf, ne) + g.addPhantomsAndScale(np0, np0, true, true) + pp1x = g.Point[len(g.Point)-4].X + if g.hinting != NoHinting { + if len(program) != 0 { + err := g.hinter.run( + program, + g.Point[np0:], + g.Unhinted[np0:], + g.InFontUnits[np0:], + g.End[ne0:], + ) + if err != nil { + return err + } + } + // Drop the four phantom points. + g.InFontUnits = g.InFontUnits[:len(g.InFontUnits)-4] + g.Unhinted = g.Unhinted[:len(g.Unhinted)-4] + } + if useMyMetrics { + copy(g.phantomPoints[:], g.Point[len(g.Point)-4:]) + } + g.Point = g.Point[:len(g.Point)-4] + if np0 != 0 { + // The hinting program expects the []End values to be indexed relative + // to the inner glyph, not the outer glyph, so we delay adding np0 until + // after the hinting program (if any) has run. + for i := ne0; i < len(g.End); i++ { + g.End[i] += np0 + } + } + } + if useMyMetrics && !g.metricsSet { + g.metricsSet = true + g.pp1x = pp1x + } + return nil +} + +// loadOffset is the initial offset for loadSimple and loadCompound. The first +// 10 bytes are the number of contours and the bounding box. +const loadOffset = 10 + +func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { + offset := loadOffset + for i := 0; i < ne; i++ { + g.End = append(g.End, 1+int(u16(glyf, offset))) + offset += 2 + } + + // Note the TrueType hinting instructions. + instrLen := int(u16(glyf, offset)) + offset += 2 + program = glyf[offset : offset+instrLen] + offset += instrLen + + np0 := len(g.Point) + np1 := np0 + int(g.End[len(g.End)-1]) + + // Decode the flags. + for i := np0; i < np1; { + c := uint32(glyf[offset]) + offset++ + g.Point = append(g.Point, Point{Flags: c}) + i++ + if c&flagRepeat != 0 { + count := glyf[offset] + offset++ + for ; count > 0; count-- { + g.Point = append(g.Point, Point{Flags: c}) + i++ + } + } + } + + // Decode the co-ordinates. + var x int16 + for i := np0; i < np1; i++ { + f := g.Point[i].Flags + if f&flagXShortVector != 0 { + dx := int16(glyf[offset]) + offset++ + if f&flagPositiveXShortVector == 0 { + x -= dx + } else { + x += dx + } + } else if f&flagThisXIsSame == 0 { + x += int16(u16(glyf, offset)) + offset += 2 + } + g.Point[i].X = int32(x) + } + var y int16 + for i := np0; i < np1; i++ { + f := g.Point[i].Flags + if f&flagYShortVector != 0 { + dy := int16(glyf[offset]) + offset++ + if f&flagPositiveYShortVector == 0 { + y -= dy + } else { + y += dy + } + } else if f&flagThisYIsSame == 0 { + y += int16(u16(glyf, offset)) + offset += 2 + } + g.Point[i].Y = int32(y) + } + + return program +} + +func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, + glyf []byte, useMyMetrics bool) error { + + // Flags for decoding a compound glyph. These flags are documented at + // http://developer.apple.com/fonts/TTRefMan/RM06/Chap6glyf.html. + const ( + flagArg1And2AreWords = 1 << iota + flagArgsAreXYValues + flagRoundXYToGrid + flagWeHaveAScale + flagUnused + flagMoreComponents + flagWeHaveAnXAndYScale + flagWeHaveATwoByTwo + flagWeHaveInstructions + flagUseMyMetrics + flagOverlapCompound + ) + np0, ne0 := len(g.Point), len(g.End) + offset := loadOffset + for { + flags := u16(glyf, offset) + component := Index(u16(glyf, offset+2)) + dx, dy, transform, hasTransform := int32(0), int32(0), [4]int32{}, false + if flags&flagArg1And2AreWords != 0 { + dx = int32(int16(u16(glyf, offset+4))) + dy = int32(int16(u16(glyf, offset+6))) + offset += 8 + } else { + dx = int32(int16(int8(glyf[offset+4]))) + dy = int32(int16(int8(glyf[offset+5]))) + offset += 6 + } + if flags&flagArgsAreXYValues == 0 { + return UnsupportedError("compound glyph transform vector") + } + if flags&(flagWeHaveAScale|flagWeHaveAnXAndYScale|flagWeHaveATwoByTwo) != 0 { + hasTransform = true + switch { + case flags&flagWeHaveAScale != 0: + transform[0] = int32(int16(u16(glyf, offset+0))) + transform[3] = transform[0] + offset += 2 + case flags&flagWeHaveAnXAndYScale != 0: + transform[0] = int32(int16(u16(glyf, offset+0))) + transform[3] = int32(int16(u16(glyf, offset+2))) + offset += 4 + case flags&flagWeHaveATwoByTwo != 0: + transform[0] = int32(int16(u16(glyf, offset+0))) + transform[1] = int32(int16(u16(glyf, offset+2))) + transform[2] = int32(int16(u16(glyf, offset+4))) + transform[3] = int32(int16(u16(glyf, offset+6))) + offset += 8 + } + } + savedPP := g.phantomPoints + np0 := len(g.Point) + componentUMM := useMyMetrics && (flags&flagUseMyMetrics != 0) + if err := g.load(recursion+1, component, componentUMM); err != nil { + return err + } + if flags&flagUseMyMetrics == 0 { + g.phantomPoints = savedPP + } + if hasTransform { + for j := np0; j < len(g.Point); j++ { + p := &g.Point[j] + newX := int32((int64(p.X)*int64(transform[0])+1<<13)>>14) + + int32((int64(p.Y)*int64(transform[2])+1<<13)>>14) + newY := int32((int64(p.X)*int64(transform[1])+1<<13)>>14) + + int32((int64(p.Y)*int64(transform[3])+1<<13)>>14) + p.X, p.Y = newX, newY + } + } + dx = g.font.scale(g.scale * dx) + dy = g.font.scale(g.scale * dy) + if flags&flagRoundXYToGrid != 0 { + dx = (dx + 32) &^ 63 + dy = (dy + 32) &^ 63 + } + for j := np0; j < len(g.Point); j++ { + p := &g.Point[j] + p.X += dx + p.Y += dy + } + // TODO: also adjust g.InFontUnits and g.Unhinted? + if flags&flagMoreComponents == 0 { + break + } + } + + instrLen := 0 + if g.hinting != NoHinting && offset+2 <= len(glyf) { + instrLen = int(u16(glyf, offset)) + offset += 2 + } + + g.addPhantomsAndScale(np0, len(g.Point), false, instrLen > 0) + points, ends := g.Point[np0:], g.End[ne0:] + g.Point = g.Point[:len(g.Point)-4] + for j := range points { + points[j].Flags &^= flagTouchedX | flagTouchedY + } + + if instrLen == 0 { + if !g.metricsSet { + copy(g.phantomPoints[:], points[len(points)-4:]) + } + return nil + } + + // Hint the compound glyph. + program := glyf[offset : offset+instrLen] + // Temporarily adjust the ends to be relative to this compound glyph. + if np0 != 0 { + for i := range ends { + ends[i] -= np0 + } + } + // Hinting instructions of a composite glyph completely refer to the + // (already) hinted subglyphs. + g.tmp = append(g.tmp[:0], points...) + if err := g.hinter.run(program, points, g.tmp, g.tmp, ends); err != nil { + return err + } + if np0 != 0 { + for i := range ends { + ends[i] += np0 + } + } + if !g.metricsSet { + copy(g.phantomPoints[:], points[len(points)-4:]) + } + return nil +} + +func (g *GlyphBuf) addPhantomsAndScale(np0, np1 int, simple, adjust bool) { + // Add the four phantom points. + g.Point = append(g.Point, g.phantomPoints[:]...) + // Scale the points. + if simple && g.hinting != NoHinting { + g.InFontUnits = append(g.InFontUnits, g.Point[np1:]...) + } + for i := np1; i < len(g.Point); i++ { + p := &g.Point[i] + p.X = g.font.scale(g.scale * p.X) + p.Y = g.font.scale(g.scale * p.Y) + } + if g.hinting == NoHinting { + return + } + // Round the 1st phantom point to the grid, shifting all other points equally. + // Note that "all other points" starts from np0, not np1. + // TODO: delete this adjustment and the np0/np1 distinction, when + // we update the compatibility tests to C Freetype 2.5.3. + // See http://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=05c786d990390a7ca18e62962641dac740bacb06 + if adjust { + pp1x := g.Point[len(g.Point)-4].X + if dx := ((pp1x + 32) &^ 63) - pp1x; dx != 0 { + for i := np0; i < len(g.Point); i++ { + g.Point[i].X += dx + } + } + } + if simple { + g.Unhinted = append(g.Unhinted, g.Point[np1:]...) + } + // Round the 2nd and 4th phantom point to the grid. + p := &g.Point[len(g.Point)-3] + p.X = (p.X + 32) &^ 63 + p = &g.Point[len(g.Point)-1] + p.Y = (p.Y + 32) &^ 63 +} + +// TODO: is this necessary? The zero-valued GlyphBuf is perfectly usable. + +// NewGlyphBuf returns a newly allocated GlyphBuf. +func NewGlyphBuf() *GlyphBuf { + return &GlyphBuf{ + Point: make([]Point, 0, 256), + End: make([]int, 0, 32), + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint.go new file mode 100644 index 000000000..26c631436 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint.go @@ -0,0 +1,1764 @@ +// Copyright 2012 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +// This file implements a Truetype bytecode interpreter. +// The opcodes are described at https://developer.apple.com/fonts/TTRefMan/RM05/Chap5.html + +import ( + "errors" + "math" +) + +const ( + twilightZone = 0 + glyphZone = 1 + numZone = 2 +) + +type pointType uint32 + +const ( + current pointType = 0 + unhinted pointType = 1 + inFontUnits pointType = 2 + numPointType = 3 +) + +// callStackEntry is a bytecode call stack entry. +type callStackEntry struct { + program []byte + pc int + loopCount int32 +} + +// hinter implements bytecode hinting. A hinter can be re-used to hint a series +// of glyphs from a Font. +type hinter struct { + stack, store []int32 + + // functions is a map from function number to bytecode. + functions map[int32][]byte + + // font and scale are the font and scale last used for this hinter. + // Changing the font will require running the new font's fpgm bytecode. + // Changing either will require running the font's prep bytecode. + font *Font + scale int32 + + // gs and defaultGS are the current and default graphics state. The + // default graphics state is the global default graphics state after + // the font's fpgm and prep programs have been run. + gs, defaultGS graphicsState + + // points and ends are the twilight zone's points, glyph's points + // and glyph's contour boundaries. + points [numZone][numPointType][]Point + ends []int + + // scaledCVT is the lazily initialized scaled Control Value Table. + scaledCVTInitialized bool + scaledCVT []f26dot6 +} + +// graphicsState is described at https://developer.apple.com/fonts/TTRefMan/RM04/Chap4.html +type graphicsState struct { + // Projection vector, freedom vector and dual projection vector. + pv, fv, dv [2]f2dot14 + // Reference points and zone pointers. + rp, zp [3]int32 + // Control Value / Single Width Cut-In. + controlValueCutIn, singleWidthCutIn, singleWidth f26dot6 + // Delta base / shift. + deltaBase, deltaShift int32 + // Minimum distance. + minDist f26dot6 + // Loop count. + loop int32 + // Rounding policy. + roundPeriod, roundPhase, roundThreshold f26dot6 + roundSuper45 bool + // Auto-flip. + autoFlip bool +} + +var globalDefaultGS = graphicsState{ + pv: [2]f2dot14{0x4000, 0}, // Unit vector along the X axis. + fv: [2]f2dot14{0x4000, 0}, + dv: [2]f2dot14{0x4000, 0}, + zp: [3]int32{1, 1, 1}, + controlValueCutIn: (17 << 6) / 16, // 17/16 as an f26dot6. + deltaBase: 9, + deltaShift: 3, + minDist: 1 << 6, // 1 as an f26dot6. + loop: 1, + roundPeriod: 1 << 6, // 1 as an f26dot6. + roundThreshold: 1 << 5, // 1/2 as an f26dot6. + roundSuper45: false, + autoFlip: true, +} + +func resetTwilightPoints(f *Font, p []Point) []Point { + if n := int(f.maxTwilightPoints) + 4; n <= cap(p) { + p = p[:n] + for i := range p { + p[i] = Point{} + } + } else { + p = make([]Point, n) + } + return p +} + +func (h *hinter) init(f *Font, scale int32) error { + h.points[twilightZone][0] = resetTwilightPoints(f, h.points[twilightZone][0]) + h.points[twilightZone][1] = resetTwilightPoints(f, h.points[twilightZone][1]) + h.points[twilightZone][2] = resetTwilightPoints(f, h.points[twilightZone][2]) + + rescale := h.scale != scale + if h.font != f { + h.font, rescale = f, true + if h.functions == nil { + h.functions = make(map[int32][]byte) + } else { + for k := range h.functions { + delete(h.functions, k) + } + } + + if x := int(f.maxStackElements); x > len(h.stack) { + x += 255 + x &^= 255 + h.stack = make([]int32, x) + } + if x := int(f.maxStorage); x > len(h.store) { + x += 15 + x &^= 15 + h.store = make([]int32, x) + } + if len(f.fpgm) != 0 { + if err := h.run(f.fpgm, nil, nil, nil, nil); err != nil { + return err + } + } + } + + if rescale { + h.scale = scale + h.scaledCVTInitialized = false + + h.defaultGS = globalDefaultGS + + if len(f.prep) != 0 { + if err := h.run(f.prep, nil, nil, nil, nil); err != nil { + return err + } + h.defaultGS = h.gs + // The MS rasterizer doesn't allow the following graphics state + // variables to be modified by the CVT program. + h.defaultGS.pv = globalDefaultGS.pv + h.defaultGS.fv = globalDefaultGS.fv + h.defaultGS.dv = globalDefaultGS.dv + h.defaultGS.rp = globalDefaultGS.rp + h.defaultGS.zp = globalDefaultGS.zp + h.defaultGS.loop = globalDefaultGS.loop + } + } + return nil +} + +func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, ends []int) error { + h.gs = h.defaultGS + h.points[glyphZone][current] = pCurrent + h.points[glyphZone][unhinted] = pUnhinted + h.points[glyphZone][inFontUnits] = pInFontUnits + h.ends = ends + + if len(program) > 50000 { + return errors.New("truetype: hinting: too many instructions") + } + var ( + steps, pc, top int + opcode uint8 + + callStack [32]callStackEntry + callStackTop int + ) + + for 0 <= pc && pc < len(program) { + steps++ + if steps == 100000 { + return errors.New("truetype: hinting: too many steps") + } + opcode = program[pc] + if top < int(popCount[opcode]) { + return errors.New("truetype: hinting: stack underflow") + } + switch opcode { + + case opSVTCA0: + h.gs.pv = [2]f2dot14{0, 0x4000} + h.gs.fv = [2]f2dot14{0, 0x4000} + h.gs.dv = [2]f2dot14{0, 0x4000} + + case opSVTCA1: + h.gs.pv = [2]f2dot14{0x4000, 0} + h.gs.fv = [2]f2dot14{0x4000, 0} + h.gs.dv = [2]f2dot14{0x4000, 0} + + case opSPVTCA0: + h.gs.pv = [2]f2dot14{0, 0x4000} + h.gs.dv = [2]f2dot14{0, 0x4000} + + case opSPVTCA1: + h.gs.pv = [2]f2dot14{0x4000, 0} + h.gs.dv = [2]f2dot14{0x4000, 0} + + case opSFVTCA0: + h.gs.fv = [2]f2dot14{0, 0x4000} + + case opSFVTCA1: + h.gs.fv = [2]f2dot14{0x4000, 0} + + case opSPVTL0, opSPVTL1, opSFVTL0, opSFVTL1: + top -= 2 + p1 := h.point(0, current, h.stack[top+0]) + p2 := h.point(0, current, h.stack[top+1]) + if p1 == nil || p2 == nil { + return errors.New("truetype: hinting: point out of range") + } + dx := f2dot14(p1.X - p2.X) + dy := f2dot14(p1.Y - p2.Y) + if dx == 0 && dy == 0 { + dx = 0x4000 + } else if opcode&1 != 0 { + // Counter-clockwise rotation. + dx, dy = -dy, dx + } + v := normalize(dx, dy) + if opcode < opSFVTL0 { + h.gs.pv = v + h.gs.dv = v + } else { + h.gs.fv = v + } + + case opSPVFS: + top -= 2 + h.gs.pv = normalize(f2dot14(h.stack[top]), f2dot14(h.stack[top+1])) + h.gs.dv = h.gs.pv + + case opSFVFS: + top -= 2 + h.gs.fv = normalize(f2dot14(h.stack[top]), f2dot14(h.stack[top+1])) + + case opGPV: + if top+1 >= len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + h.stack[top+0] = int32(h.gs.pv[0]) + h.stack[top+1] = int32(h.gs.pv[1]) + top += 2 + + case opGFV: + if top+1 >= len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + h.stack[top+0] = int32(h.gs.fv[0]) + h.stack[top+1] = int32(h.gs.fv[1]) + top += 2 + + case opSFVTPV: + h.gs.fv = h.gs.pv + + case opISECT: + top -= 5 + p := h.point(2, current, h.stack[top+0]) + a0 := h.point(1, current, h.stack[top+1]) + a1 := h.point(1, current, h.stack[top+2]) + b0 := h.point(0, current, h.stack[top+3]) + b1 := h.point(0, current, h.stack[top+4]) + if p == nil || a0 == nil || a1 == nil || b0 == nil || b1 == nil { + return errors.New("truetype: hinting: point out of range") + } + + dbx := b1.X - b0.X + dby := b1.Y - b0.Y + dax := a1.X - a0.X + day := a1.Y - a0.Y + dx := b0.X - a0.X + dy := b0.Y - a0.Y + discriminant := mulDiv(int64(dax), int64(-dby), 0x40) + + mulDiv(int64(day), int64(dbx), 0x40) + dotProduct := mulDiv(int64(dax), int64(dbx), 0x40) + + mulDiv(int64(day), int64(dby), 0x40) + // The discriminant above is actually a cross product of vectors + // da and db. Together with the dot product, they can be used as + // surrogates for sine and cosine of the angle between the vectors. + // Indeed, + // dotproduct = |da||db|cos(angle) + // discriminant = |da||db|sin(angle) + // We use these equations to reject grazing intersections by + // thresholding abs(tan(angle)) at 1/19, corresponding to 3 degrees. + absDisc, absDotP := discriminant, dotProduct + if absDisc < 0 { + absDisc = -absDisc + } + if absDotP < 0 { + absDotP = -absDotP + } + if 19*absDisc > absDotP { + val := mulDiv(int64(dx), int64(-dby), 0x40) + + mulDiv(int64(dy), int64(dbx), 0x40) + rx := mulDiv(val, int64(dax), discriminant) + ry := mulDiv(val, int64(day), discriminant) + p.X = a0.X + int32(rx) + p.Y = a0.Y + int32(ry) + } else { + p.X = (a0.X + a1.X + b0.X + b1.X) / 4 + p.Y = (a0.Y + a1.Y + b0.Y + b1.Y) / 4 + } + p.Flags |= flagTouchedX | flagTouchedY + + case opSRP0, opSRP1, opSRP2: + top-- + h.gs.rp[opcode-opSRP0] = h.stack[top] + + case opSZP0, opSZP1, opSZP2: + top-- + h.gs.zp[opcode-opSZP0] = h.stack[top] + + case opSZPS: + top-- + h.gs.zp[0] = h.stack[top] + h.gs.zp[1] = h.stack[top] + h.gs.zp[2] = h.stack[top] + + case opSLOOP: + top-- + if h.stack[top] <= 0 { + return errors.New("truetype: hinting: invalid data") + } + h.gs.loop = h.stack[top] + + case opRTG: + h.gs.roundPeriod = 1 << 6 + h.gs.roundPhase = 0 + h.gs.roundThreshold = 1 << 5 + h.gs.roundSuper45 = false + + case opRTHG: + h.gs.roundPeriod = 1 << 6 + h.gs.roundPhase = 1 << 5 + h.gs.roundThreshold = 1 << 5 + h.gs.roundSuper45 = false + + case opSMD: + top-- + h.gs.minDist = f26dot6(h.stack[top]) + + case opELSE: + opcode = 1 + goto ifelse + + case opJMPR: + top-- + pc += int(h.stack[top]) + continue + + case opSCVTCI: + top-- + h.gs.controlValueCutIn = f26dot6(h.stack[top]) + + case opSSWCI: + top-- + h.gs.singleWidthCutIn = f26dot6(h.stack[top]) + + case opSSW: + top-- + h.gs.singleWidth = f26dot6(h.font.scale(h.scale * h.stack[top])) + + case opDUP: + if top >= len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + h.stack[top] = h.stack[top-1] + top++ + + case opPOP: + top-- + + case opCLEAR: + top = 0 + + case opSWAP: + h.stack[top-1], h.stack[top-2] = h.stack[top-2], h.stack[top-1] + + case opDEPTH: + if top >= len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + h.stack[top] = int32(top) + top++ + + case opCINDEX, opMINDEX: + x := int(h.stack[top-1]) + if x <= 0 || x >= top { + return errors.New("truetype: hinting: invalid data") + } + h.stack[top-1] = h.stack[top-1-x] + if opcode == opMINDEX { + copy(h.stack[top-1-x:top-1], h.stack[top-x:top]) + top-- + } + + case opALIGNPTS: + top -= 2 + p := h.point(1, current, h.stack[top]) + q := h.point(0, current, h.stack[top+1]) + if p == nil || q == nil { + return errors.New("truetype: hinting: point out of range") + } + d := dotProduct(f26dot6(q.X-p.X), f26dot6(q.Y-p.Y), h.gs.pv) / 2 + h.move(p, +d, true) + h.move(q, -d, true) + + case opUTP: + top-- + p := h.point(0, current, h.stack[top]) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + p.Flags &^= flagTouchedX | flagTouchedY + + case opLOOPCALL, opCALL: + if callStackTop >= len(callStack) { + return errors.New("truetype: hinting: call stack overflow") + } + top-- + f, ok := h.functions[h.stack[top]] + if !ok { + return errors.New("truetype: hinting: undefined function") + } + callStack[callStackTop] = callStackEntry{program, pc, 1} + if opcode == opLOOPCALL { + top-- + if h.stack[top] == 0 { + break + } + callStack[callStackTop].loopCount = h.stack[top] + } + callStackTop++ + program, pc = f, 0 + continue + + case opFDEF: + // Save all bytecode up until the next ENDF. + startPC := pc + 1 + fdefloop: + for { + pc++ + if pc >= len(program) { + return errors.New("truetype: hinting: unbalanced FDEF") + } + switch program[pc] { + case opFDEF: + return errors.New("truetype: hinting: nested FDEF") + case opENDF: + top-- + h.functions[h.stack[top]] = program[startPC : pc+1] + break fdefloop + default: + var ok bool + pc, ok = skipInstructionPayload(program, pc) + if !ok { + return errors.New("truetype: hinting: unbalanced FDEF") + } + } + } + + case opENDF: + if callStackTop == 0 { + return errors.New("truetype: hinting: call stack underflow") + } + callStackTop-- + callStack[callStackTop].loopCount-- + if callStack[callStackTop].loopCount != 0 { + callStackTop++ + pc = 0 + continue + } + program, pc = callStack[callStackTop].program, callStack[callStackTop].pc + + case opMDAP0, opMDAP1: + top-- + i := h.stack[top] + p := h.point(0, current, i) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + distance := f26dot6(0) + if opcode == opMDAP1 { + distance = dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) + // TODO: metrics compensation. + distance = h.round(distance) - distance + } + h.move(p, distance, true) + h.gs.rp[0] = i + h.gs.rp[1] = i + + case opIUP0, opIUP1: + iupY, mask := opcode == opIUP0, uint32(flagTouchedX) + if iupY { + mask = flagTouchedY + } + prevEnd := 0 + for _, end := range h.ends { + for i := prevEnd; i < end; i++ { + for i < end && h.points[glyphZone][current][i].Flags&mask == 0 { + i++ + } + if i == end { + break + } + firstTouched, curTouched := i, i + i++ + for ; i < end; i++ { + if h.points[glyphZone][current][i].Flags&mask != 0 { + h.iupInterp(iupY, curTouched+1, i-1, curTouched, i) + curTouched = i + } + } + if curTouched == firstTouched { + h.iupShift(iupY, prevEnd, end, curTouched) + } else { + h.iupInterp(iupY, curTouched+1, end-1, curTouched, firstTouched) + if firstTouched > 0 { + h.iupInterp(iupY, prevEnd, firstTouched-1, curTouched, firstTouched) + } + } + } + prevEnd = end + } + + case opSHP0, opSHP1: + if top < int(h.gs.loop) { + return errors.New("truetype: hinting: stack underflow") + } + _, _, d, ok := h.displacement(opcode&1 == 0) + if !ok { + return errors.New("truetype: hinting: point out of range") + } + for ; h.gs.loop != 0; h.gs.loop-- { + top-- + p := h.point(2, current, h.stack[top]) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + h.move(p, d, true) + } + h.gs.loop = 1 + + case opSHC0, opSHC1: + top-- + zonePointer, i, d, ok := h.displacement(opcode&1 == 0) + if !ok { + return errors.New("truetype: hinting: point out of range") + } + if h.gs.zp[2] == 0 { + // TODO: implement this when we have a glyph that does this. + return errors.New("hinting: unimplemented SHC instruction") + } + contour := h.stack[top] + if contour < 0 || len(ends) <= int(contour) { + return errors.New("truetype: hinting: contour out of range") + } + j0, j1 := int32(0), int32(h.ends[contour]) + if contour > 0 { + j0 = int32(h.ends[contour-1]) + } + move := h.gs.zp[zonePointer] != h.gs.zp[2] + for j := j0; j < j1; j++ { + if move || j != i { + h.move(h.point(2, current, j), d, true) + } + } + + case opSHZ0, opSHZ1: + top-- + zonePointer, i, d, ok := h.displacement(opcode&1 == 0) + if !ok { + return errors.New("truetype: hinting: point out of range") + } + + // As per C Freetype, SHZ doesn't move the phantom points, or mark + // the points as touched. + limit := int32(len(h.points[h.gs.zp[2]][current])) + if h.gs.zp[2] == glyphZone { + limit -= 4 + } + for j := int32(0); j < limit; j++ { + if i != j || h.gs.zp[zonePointer] != h.gs.zp[2] { + h.move(h.point(2, current, j), d, false) + } + } + + case opSHPIX: + top-- + d := f26dot6(h.stack[top]) + if top < int(h.gs.loop) { + return errors.New("truetype: hinting: stack underflow") + } + for ; h.gs.loop != 0; h.gs.loop-- { + top-- + p := h.point(2, current, h.stack[top]) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + h.move(p, d, true) + } + h.gs.loop = 1 + + case opIP: + if top < int(h.gs.loop) { + return errors.New("truetype: hinting: stack underflow") + } + pointType := inFontUnits + twilight := h.gs.zp[0] == 0 || h.gs.zp[1] == 0 || h.gs.zp[2] == 0 + if twilight { + pointType = unhinted + } + p := h.point(1, pointType, h.gs.rp[2]) + oldP := h.point(0, pointType, h.gs.rp[1]) + oldRange := dotProduct(f26dot6(p.X-oldP.X), f26dot6(p.Y-oldP.Y), h.gs.dv) + + p = h.point(1, current, h.gs.rp[2]) + curP := h.point(0, current, h.gs.rp[1]) + curRange := dotProduct(f26dot6(p.X-curP.X), f26dot6(p.Y-curP.Y), h.gs.pv) + for ; h.gs.loop != 0; h.gs.loop-- { + top-- + i := h.stack[top] + p = h.point(2, pointType, i) + oldDist := dotProduct(f26dot6(p.X-oldP.X), f26dot6(p.Y-oldP.Y), h.gs.dv) + p = h.point(2, current, i) + curDist := dotProduct(f26dot6(p.X-curP.X), f26dot6(p.Y-curP.Y), h.gs.pv) + newDist := f26dot6(0) + if oldDist != 0 { + if oldRange != 0 { + newDist = f26dot6(mulDiv(int64(oldDist), int64(curRange), int64(oldRange))) + } else { + newDist = -oldDist + } + } + h.move(p, newDist-curDist, true) + } + h.gs.loop = 1 + + case opMSIRP0, opMSIRP1: + top -= 2 + i := h.stack[top] + distance := f26dot6(h.stack[top+1]) + + // TODO: special case h.gs.zp[1] == 0 in C Freetype. + ref := h.point(0, current, h.gs.rp[0]) + p := h.point(1, current, i) + if ref == nil || p == nil { + return errors.New("truetype: hinting: point out of range") + } + curDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + + // Set-RP0 bit. + if opcode == opMSIRP1 { + h.gs.rp[0] = i + } + h.gs.rp[1] = h.gs.rp[0] + h.gs.rp[2] = i + + // Move the point. + h.move(p, distance-curDist, true) + + case opALIGNRP: + if top < int(h.gs.loop) { + return errors.New("truetype: hinting: stack underflow") + } + ref := h.point(0, current, h.gs.rp[0]) + if ref == nil { + return errors.New("truetype: hinting: point out of range") + } + for ; h.gs.loop != 0; h.gs.loop-- { + top-- + p := h.point(1, current, h.stack[top]) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + h.move(p, -dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv), true) + } + h.gs.loop = 1 + + case opRTDG: + h.gs.roundPeriod = 1 << 5 + h.gs.roundPhase = 0 + h.gs.roundThreshold = 1 << 4 + h.gs.roundSuper45 = false + + case opMIAP0, opMIAP1: + top -= 2 + i := h.stack[top] + distance := h.getScaledCVT(h.stack[top+1]) + if h.gs.zp[0] == 0 { + p := h.point(0, unhinted, i) + q := h.point(0, current, i) + p.X = int32((int64(distance) * int64(h.gs.fv[0])) >> 14) + p.Y = int32((int64(distance) * int64(h.gs.fv[1])) >> 14) + *q = *p + } + p := h.point(0, current, i) + oldDist := dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) + if opcode == opMIAP1 { + if (distance - oldDist).abs() > h.gs.controlValueCutIn { + distance = oldDist + } + // TODO: metrics compensation. + distance = h.round(distance) + } + h.move(p, distance-oldDist, true) + h.gs.rp[0] = i + h.gs.rp[1] = i + + case opNPUSHB: + opcode = 0 + goto push + + case opNPUSHW: + opcode = 0x80 + goto push + + case opWS: + top -= 2 + i := int(h.stack[top]) + if i < 0 || len(h.store) <= i { + return errors.New("truetype: hinting: invalid data") + } + h.store[i] = h.stack[top+1] + + case opRS: + i := int(h.stack[top-1]) + if i < 0 || len(h.store) <= i { + return errors.New("truetype: hinting: invalid data") + } + h.stack[top-1] = h.store[i] + + case opWCVTP: + top -= 2 + h.setScaledCVT(h.stack[top], f26dot6(h.stack[top+1])) + + case opRCVT: + h.stack[top-1] = int32(h.getScaledCVT(h.stack[top-1])) + + case opGC0, opGC1: + i := h.stack[top-1] + if opcode == opGC0 { + p := h.point(2, current, i) + h.stack[top-1] = int32(dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv)) + } else { + p := h.point(2, unhinted, i) + // Using dv as per C Freetype. + h.stack[top-1] = int32(dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.dv)) + } + + case opSCFS: + top -= 2 + i := h.stack[top] + p := h.point(2, current, i) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + c := dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) + h.move(p, f26dot6(h.stack[top+1])-c, true) + if h.gs.zp[2] != 0 { + break + } + q := h.point(2, unhinted, i) + if q == nil { + return errors.New("truetype: hinting: point out of range") + } + q.X = p.X + q.Y = p.Y + + case opMD0, opMD1: + top-- + pt, v, scale := pointType(0), [2]f2dot14{}, false + if opcode == opMD0 { + pt = current + v = h.gs.pv + } else if h.gs.zp[0] == 0 || h.gs.zp[1] == 0 { + pt = unhinted + v = h.gs.dv + } else { + pt = inFontUnits + v = h.gs.dv + scale = true + } + p := h.point(0, pt, h.stack[top-1]) + q := h.point(1, pt, h.stack[top]) + if p == nil || q == nil { + return errors.New("truetype: hinting: point out of range") + } + d := int32(dotProduct(f26dot6(p.X-q.X), f26dot6(p.Y-q.Y), v)) + if scale { + d = int32(int64(d*h.scale) / int64(h.font.fUnitsPerEm)) + } + h.stack[top-1] = d + + case opMPPEM, opMPS: + if top >= len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + // For MPS, point size should be irrelevant; we return the PPEM. + h.stack[top] = h.scale >> 6 + top++ + + case opFLIPON, opFLIPOFF: + h.gs.autoFlip = opcode == opFLIPON + + case opDEBUG: + // No-op. + + case opLT: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] < h.stack[top]) + + case opLTEQ: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] <= h.stack[top]) + + case opGT: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] > h.stack[top]) + + case opGTEQ: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] >= h.stack[top]) + + case opEQ: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] == h.stack[top]) + + case opNEQ: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] != h.stack[top]) + + case opODD, opEVEN: + i := h.round(f26dot6(h.stack[top-1])) >> 6 + h.stack[top-1] = int32(i&1) ^ int32(opcode-opODD) + + case opIF: + top-- + if h.stack[top] == 0 { + opcode = 0 + goto ifelse + } + + case opEIF: + // No-op. + + case opAND: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1] != 0 && h.stack[top] != 0) + + case opOR: + top-- + h.stack[top-1] = bool2int32(h.stack[top-1]|h.stack[top] != 0) + + case opNOT: + h.stack[top-1] = bool2int32(h.stack[top-1] == 0) + + case opDELTAP1: + goto delta + + case opSDB: + top-- + h.gs.deltaBase = h.stack[top] + + case opSDS: + top-- + h.gs.deltaShift = h.stack[top] + + case opADD: + top-- + h.stack[top-1] += h.stack[top] + + case opSUB: + top-- + h.stack[top-1] -= h.stack[top] + + case opDIV: + top-- + if h.stack[top] == 0 { + return errors.New("truetype: hinting: division by zero") + } + h.stack[top-1] = int32(f26dot6(h.stack[top-1]).div(f26dot6(h.stack[top]))) + + case opMUL: + top-- + h.stack[top-1] = int32(f26dot6(h.stack[top-1]).mul(f26dot6(h.stack[top]))) + + case opABS: + if h.stack[top-1] < 0 { + h.stack[top-1] = -h.stack[top-1] + } + + case opNEG: + h.stack[top-1] = -h.stack[top-1] + + case opFLOOR: + h.stack[top-1] &^= 63 + + case opCEILING: + h.stack[top-1] += 63 + h.stack[top-1] &^= 63 + + case opROUND00, opROUND01, opROUND10, opROUND11: + // The four flavors of opROUND are equivalent. See the comment below on + // opNROUND for the rationale. + h.stack[top-1] = int32(h.round(f26dot6(h.stack[top-1]))) + + case opNROUND00, opNROUND01, opNROUND10, opNROUND11: + // No-op. The spec says to add one of four "compensations for the engine + // characteristics", to cater for things like "different dot-size printers". + // https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#engine_compensation + // This code does not implement engine compensation, as we don't expect to + // be used to output on dot-matrix printers. + + case opWCVTF: + top -= 2 + h.setScaledCVT(h.stack[top], f26dot6(h.font.scale(h.scale*h.stack[top+1]))) + + case opDELTAP2, opDELTAP3, opDELTAC1, opDELTAC2, opDELTAC3: + goto delta + + case opSROUND, opS45ROUND: + top-- + switch (h.stack[top] >> 6) & 0x03 { + case 0: + h.gs.roundPeriod = 1 << 5 + case 1, 3: + h.gs.roundPeriod = 1 << 6 + case 2: + h.gs.roundPeriod = 1 << 7 + } + h.gs.roundSuper45 = opcode == opS45ROUND + if h.gs.roundSuper45 { + // The spec says to multiply by √2, but the C Freetype code says 1/√2. + // We go with 1/√2. + h.gs.roundPeriod *= 46341 + h.gs.roundPeriod /= 65536 + } + h.gs.roundPhase = h.gs.roundPeriod * f26dot6((h.stack[top]>>4)&0x03) / 4 + if x := h.stack[top] & 0x0f; x != 0 { + h.gs.roundThreshold = h.gs.roundPeriod * f26dot6(x-4) / 8 + } else { + h.gs.roundThreshold = h.gs.roundPeriod - 1 + } + + case opJROT: + top -= 2 + if h.stack[top+1] != 0 { + pc += int(h.stack[top]) + continue + } + + case opJROF: + top -= 2 + if h.stack[top+1] == 0 { + pc += int(h.stack[top]) + continue + } + + case opROFF: + h.gs.roundPeriod = 0 + h.gs.roundPhase = 0 + h.gs.roundThreshold = 0 + h.gs.roundSuper45 = false + + case opRUTG: + h.gs.roundPeriod = 1 << 6 + h.gs.roundPhase = 0 + h.gs.roundThreshold = 1<<6 - 1 + h.gs.roundSuper45 = false + + case opRDTG: + h.gs.roundPeriod = 1 << 6 + h.gs.roundPhase = 0 + h.gs.roundThreshold = 0 + h.gs.roundSuper45 = false + + case opSANGW, opAA: + // These ops are "anachronistic" and no longer used. + top-- + + case opFLIPPT: + if top < int(h.gs.loop) { + return errors.New("truetype: hinting: stack underflow") + } + points := h.points[glyphZone][current] + for ; h.gs.loop != 0; h.gs.loop-- { + top-- + i := h.stack[top] + if i < 0 || len(points) <= int(i) { + return errors.New("truetype: hinting: point out of range") + } + points[i].Flags ^= flagOnCurve + } + h.gs.loop = 1 + + case opFLIPRGON, opFLIPRGOFF: + top -= 2 + i, j, points := h.stack[top], h.stack[top+1], h.points[glyphZone][current] + if i < 0 || len(points) <= int(i) || j < 0 || len(points) <= int(j) { + return errors.New("truetype: hinting: point out of range") + } + for ; i <= j; i++ { + if opcode == opFLIPRGON { + points[i].Flags |= flagOnCurve + } else { + points[i].Flags &^= flagOnCurve + } + } + + case opSCANCTRL: + // We do not support dropout control, as we always rasterize grayscale glyphs. + top-- + + case opSDPVTL0, opSDPVTL1: + top -= 2 + for i := 0; i < 2; i++ { + pt := unhinted + if i != 0 { + pt = current + } + p := h.point(1, pt, h.stack[top]) + q := h.point(2, pt, h.stack[top+1]) + if p == nil || q == nil { + return errors.New("truetype: hinting: point out of range") + } + dx := f2dot14(p.X - q.X) + dy := f2dot14(p.Y - q.Y) + if dx == 0 && dy == 0 { + dx = 0x4000 + } else if opcode&1 != 0 { + // Counter-clockwise rotation. + dx, dy = -dy, dx + } + if i == 0 { + h.gs.dv = normalize(dx, dy) + } else { + h.gs.pv = normalize(dx, dy) + } + } + + case opGETINFO: + res := int32(0) + if h.stack[top-1]&(1<<0) != 0 { + // Set the engine version. We hard-code this to 35, the same as + // the C freetype code, which says that "Version~35 corresponds + // to MS rasterizer v.1.7 as used e.g. in Windows~98". + res |= 35 + } + if h.stack[top-1]&(1<<5) != 0 { + // Set that we support grayscale. + res |= 1 << 12 + } + // We set no other bits, as we do not support rotated or stretched glyphs. + h.stack[top-1] = res + + case opIDEF: + // IDEF is for ancient versions of the bytecode interpreter, and is no longer used. + return errors.New("truetype: hinting: unsupported IDEF instruction") + + case opROLL: + h.stack[top-1], h.stack[top-3], h.stack[top-2] = + h.stack[top-3], h.stack[top-2], h.stack[top-1] + + case opMAX: + top-- + if h.stack[top-1] < h.stack[top] { + h.stack[top-1] = h.stack[top] + } + + case opMIN: + top-- + if h.stack[top-1] > h.stack[top] { + h.stack[top-1] = h.stack[top] + } + + case opSCANTYPE: + // We do not support dropout control, as we always rasterize grayscale glyphs. + top-- + + case opINSTCTRL: + // TODO: support instruction execution control? It seems rare, and even when + // nominally used (e.g. Source Sans Pro), it seems conditional on extreme or + // unusual rasterization conditions. For example, the code snippet at + // https://developer.apple.com/fonts/TTRefMan/RM05/Chap5.html#INSTCTRL + // uses INSTCTRL when grid-fitting a rotated or stretched glyph, but + // freetype-go does not support rotated or stretched glyphs. + top -= 2 + + default: + if opcode < opPUSHB000 { + return errors.New("truetype: hinting: unrecognized instruction") + } + + if opcode < opMDRP00000 { + // PUSHxxxx opcode. + + if opcode < opPUSHW000 { + opcode -= opPUSHB000 - 1 + } else { + opcode -= opPUSHW000 - 1 - 0x80 + } + goto push + } + + if opcode < opMIRP00000 { + // MDRPxxxxx opcode. + + top-- + i := h.stack[top] + ref := h.point(0, current, h.gs.rp[0]) + p := h.point(1, current, i) + if ref == nil || p == nil { + return errors.New("truetype: hinting: point out of range") + } + + oldDist := f26dot6(0) + if h.gs.zp[0] == 0 || h.gs.zp[1] == 0 { + p0 := h.point(1, unhinted, i) + p1 := h.point(0, unhinted, h.gs.rp[0]) + oldDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv) + } else { + p0 := h.point(1, inFontUnits, i) + p1 := h.point(0, inFontUnits, h.gs.rp[0]) + oldDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv) + oldDist = f26dot6(h.font.scale(h.scale * int32(oldDist))) + } + + // Single-width cut-in test. + if x := (oldDist - h.gs.singleWidth).abs(); x < h.gs.singleWidthCutIn { + if oldDist >= 0 { + oldDist = +h.gs.singleWidth + } else { + oldDist = -h.gs.singleWidth + } + } + + // Rounding bit. + // TODO: metrics compensation. + distance := oldDist + if opcode&0x04 != 0 { + distance = h.round(oldDist) + } + + // Minimum distance bit. + if opcode&0x08 != 0 { + if oldDist >= 0 { + if distance < h.gs.minDist { + distance = h.gs.minDist + } + } else { + if distance > -h.gs.minDist { + distance = -h.gs.minDist + } + } + } + + // Set-RP0 bit. + h.gs.rp[1] = h.gs.rp[0] + h.gs.rp[2] = i + if opcode&0x10 != 0 { + h.gs.rp[0] = i + } + + // Move the point. + oldDist = dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + h.move(p, distance-oldDist, true) + + } else { + // MIRPxxxxx opcode. + + top -= 2 + i := h.stack[top] + cvtDist := h.getScaledCVT(h.stack[top+1]) + if (cvtDist - h.gs.singleWidth).abs() < h.gs.singleWidthCutIn { + if cvtDist >= 0 { + cvtDist = +h.gs.singleWidth + } else { + cvtDist = -h.gs.singleWidth + } + } + + if h.gs.zp[1] == 0 { + // TODO: implement once we have a .ttf file that triggers + // this, so that we can step through C's freetype. + return errors.New("truetype: hinting: unimplemented twilight point adjustment") + } + + ref := h.point(0, unhinted, h.gs.rp[0]) + p := h.point(1, unhinted, i) + if ref == nil || p == nil { + return errors.New("truetype: hinting: point out of range") + } + oldDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.dv) + + ref = h.point(0, current, h.gs.rp[0]) + p = h.point(1, current, i) + if ref == nil || p == nil { + return errors.New("truetype: hinting: point out of range") + } + curDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + + if h.gs.autoFlip && oldDist^cvtDist < 0 { + cvtDist = -cvtDist + } + + // Rounding bit. + // TODO: metrics compensation. + distance := cvtDist + if opcode&0x04 != 0 { + // The CVT value is only used if close enough to oldDist. + if (h.gs.zp[0] == h.gs.zp[1]) && + ((cvtDist - oldDist).abs() > h.gs.controlValueCutIn) { + + distance = oldDist + } + distance = h.round(distance) + } + + // Minimum distance bit. + if opcode&0x08 != 0 { + if oldDist >= 0 { + if distance < h.gs.minDist { + distance = h.gs.minDist + } + } else { + if distance > -h.gs.minDist { + distance = -h.gs.minDist + } + } + } + + // Set-RP0 bit. + h.gs.rp[1] = h.gs.rp[0] + h.gs.rp[2] = i + if opcode&0x10 != 0 { + h.gs.rp[0] = i + } + + // Move the point. + h.move(p, distance-curDist, true) + } + } + pc++ + continue + + ifelse: + // Skip past bytecode until the next ELSE (if opcode == 0) or the + // next EIF (for all opcodes). Opcode == 0 means that we have come + // from an IF. Opcode == 1 means that we have come from an ELSE. + { + ifelseloop: + for depth := 0; ; { + pc++ + if pc >= len(program) { + return errors.New("truetype: hinting: unbalanced IF or ELSE") + } + switch program[pc] { + case opIF: + depth++ + case opELSE: + if depth == 0 && opcode == 0 { + break ifelseloop + } + case opEIF: + depth-- + if depth < 0 { + break ifelseloop + } + default: + var ok bool + pc, ok = skipInstructionPayload(program, pc) + if !ok { + return errors.New("truetype: hinting: unbalanced IF or ELSE") + } + } + } + pc++ + continue + } + + push: + // Push n elements from the program to the stack, where n is the low 7 bits of + // opcode. If the low 7 bits are zero, then n is the next byte from the program. + // The high bit being 0 means that the elements are zero-extended bytes. + // The high bit being 1 means that the elements are sign-extended words. + { + width := 1 + if opcode&0x80 != 0 { + opcode &^= 0x80 + width = 2 + } + if opcode == 0 { + pc++ + if pc >= len(program) { + return errors.New("truetype: hinting: insufficient data") + } + opcode = program[pc] + } + pc++ + if top+int(opcode) > len(h.stack) { + return errors.New("truetype: hinting: stack overflow") + } + if pc+width*int(opcode) > len(program) { + return errors.New("truetype: hinting: insufficient data") + } + for ; opcode > 0; opcode-- { + if width == 1 { + h.stack[top] = int32(program[pc]) + } else { + h.stack[top] = int32(int8(program[pc]))<<8 | int32(program[pc+1]) + } + top++ + pc += width + } + continue + } + + delta: + { + if opcode >= opDELTAC1 && !h.scaledCVTInitialized { + h.initializeScaledCVT() + } + top-- + n := h.stack[top] + if int32(top) < 2*n { + return errors.New("truetype: hinting: stack underflow") + } + for ; n > 0; n-- { + top -= 2 + b := h.stack[top] + c := (b & 0xf0) >> 4 + switch opcode { + case opDELTAP2, opDELTAC2: + c += 16 + case opDELTAP3, opDELTAC3: + c += 32 + } + c += h.gs.deltaBase + if ppem := (h.scale + 1<<5) >> 6; ppem != c { + continue + } + b = (b & 0x0f) - 8 + if b >= 0 { + b++ + } + b = b * 64 / (1 << uint32(h.gs.deltaShift)) + if opcode >= opDELTAC1 { + a := h.stack[top+1] + if a < 0 || len(h.scaledCVT) <= int(a) { + return errors.New("truetype: hinting: index out of range") + } + h.scaledCVT[a] += f26dot6(b) + } else { + p := h.point(0, current, h.stack[top+1]) + if p == nil { + return errors.New("truetype: hinting: point out of range") + } + h.move(p, f26dot6(b), true) + } + } + pc++ + continue + } + } + return nil +} + +func (h *hinter) initializeScaledCVT() { + h.scaledCVTInitialized = true + if n := len(h.font.cvt) / 2; n <= cap(h.scaledCVT) { + h.scaledCVT = h.scaledCVT[:n] + } else { + if n < 32 { + n = 32 + } + h.scaledCVT = make([]f26dot6, len(h.font.cvt)/2, n) + } + for i := range h.scaledCVT { + unscaled := uint16(h.font.cvt[2*i])<<8 | uint16(h.font.cvt[2*i+1]) + h.scaledCVT[i] = f26dot6(h.font.scale(h.scale * int32(int16(unscaled)))) + } +} + +// getScaledCVT returns the scaled value from the font's Control Value Table. +func (h *hinter) getScaledCVT(i int32) f26dot6 { + if !h.scaledCVTInitialized { + h.initializeScaledCVT() + } + if i < 0 || len(h.scaledCVT) <= int(i) { + return 0 + } + return h.scaledCVT[i] +} + +// setScaledCVT overrides the scaled value from the font's Control Value Table. +func (h *hinter) setScaledCVT(i int32, v f26dot6) { + if !h.scaledCVTInitialized { + h.initializeScaledCVT() + } + if i < 0 || len(h.scaledCVT) <= int(i) { + return + } + h.scaledCVT[i] = v +} + +func (h *hinter) point(zonePointer uint32, pt pointType, i int32) *Point { + points := h.points[h.gs.zp[zonePointer]][pt] + if i < 0 || len(points) <= int(i) { + return nil + } + return &points[i] +} + +func (h *hinter) move(p *Point, distance f26dot6, touch bool) { + fvx := int64(h.gs.fv[0]) + pvx := int64(h.gs.pv[0]) + if fvx == 0x4000 && pvx == 0x4000 { + p.X += int32(distance) + if touch { + p.Flags |= flagTouchedX + } + return + } + + fvy := int64(h.gs.fv[1]) + pvy := int64(h.gs.pv[1]) + if fvy == 0x4000 && pvy == 0x4000 { + p.Y += int32(distance) + if touch { + p.Flags |= flagTouchedY + } + return + } + + fvDotPv := (fvx*pvx + fvy*pvy) >> 14 + + if fvx != 0 { + p.X += int32(mulDiv(fvx, int64(distance), fvDotPv)) + if touch { + p.Flags |= flagTouchedX + } + } + + if fvy != 0 { + p.Y += int32(mulDiv(fvy, int64(distance), fvDotPv)) + if touch { + p.Flags |= flagTouchedY + } + } +} + +func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { + if p1 > p2 { + return + } + if ref1 >= len(h.points[glyphZone][current]) || + ref2 >= len(h.points[glyphZone][current]) { + return + } + + var ifu1, ifu2 int32 + if interpY { + ifu1 = h.points[glyphZone][inFontUnits][ref1].Y + ifu2 = h.points[glyphZone][inFontUnits][ref2].Y + } else { + ifu1 = h.points[glyphZone][inFontUnits][ref1].X + ifu2 = h.points[glyphZone][inFontUnits][ref2].X + } + if ifu1 > ifu2 { + ifu1, ifu2 = ifu2, ifu1 + ref1, ref2 = ref2, ref1 + } + + var unh1, unh2, delta1, delta2 int32 + if interpY { + unh1 = h.points[glyphZone][unhinted][ref1].Y + unh2 = h.points[glyphZone][unhinted][ref2].Y + delta1 = h.points[glyphZone][current][ref1].Y - unh1 + delta2 = h.points[glyphZone][current][ref2].Y - unh2 + } else { + unh1 = h.points[glyphZone][unhinted][ref1].X + unh2 = h.points[glyphZone][unhinted][ref2].X + delta1 = h.points[glyphZone][current][ref1].X - unh1 + delta2 = h.points[glyphZone][current][ref2].X - unh2 + } + + var xy, ifuXY int32 + if ifu1 == ifu2 { + for i := p1; i <= p2; i++ { + if interpY { + xy = h.points[glyphZone][unhinted][i].Y + } else { + xy = h.points[glyphZone][unhinted][i].X + } + + if xy <= unh1 { + xy += delta1 + } else { + xy += delta2 + } + + if interpY { + h.points[glyphZone][current][i].Y = xy + } else { + h.points[glyphZone][current][i].X = xy + } + } + return + } + + scale, scaleOK := int64(0), false + for i := p1; i <= p2; i++ { + if interpY { + xy = h.points[glyphZone][unhinted][i].Y + ifuXY = h.points[glyphZone][inFontUnits][i].Y + } else { + xy = h.points[glyphZone][unhinted][i].X + ifuXY = h.points[glyphZone][inFontUnits][i].X + } + + if xy <= unh1 { + xy += delta1 + } else if xy >= unh2 { + xy += delta2 + } else { + if !scaleOK { + scaleOK = true + scale = mulDiv(int64(unh2+delta2-unh1-delta1), 0x10000, int64(ifu2-ifu1)) + } + numer := int64(ifuXY-ifu1) * scale + if numer >= 0 { + numer += 0x8000 + } else { + numer -= 0x8000 + } + xy = unh1 + delta1 + int32(numer/0x10000) + } + + if interpY { + h.points[glyphZone][current][i].Y = xy + } else { + h.points[glyphZone][current][i].X = xy + } + } +} + +func (h *hinter) iupShift(interpY bool, p1, p2, p int) { + var delta int32 + if interpY { + delta = h.points[glyphZone][current][p].Y - h.points[glyphZone][unhinted][p].Y + } else { + delta = h.points[glyphZone][current][p].X - h.points[glyphZone][unhinted][p].X + } + if delta == 0 { + return + } + for i := p1; i < p2; i++ { + if i == p { + continue + } + if interpY { + h.points[glyphZone][current][i].Y += delta + } else { + h.points[glyphZone][current][i].X += delta + } + } +} + +func (h *hinter) displacement(useZP1 bool) (zonePointer uint32, i int32, d f26dot6, ok bool) { + zonePointer, i = uint32(0), h.gs.rp[1] + if useZP1 { + zonePointer, i = 1, h.gs.rp[2] + } + p := h.point(zonePointer, current, i) + q := h.point(zonePointer, unhinted, i) + if p == nil || q == nil { + return 0, 0, 0, false + } + d = dotProduct(f26dot6(p.X-q.X), f26dot6(p.Y-q.Y), h.gs.pv) + return zonePointer, i, d, true +} + +// skipInstructionPayload increments pc by the extra data that follows a +// variable length PUSHB or PUSHW instruction. +func skipInstructionPayload(program []byte, pc int) (newPC int, ok bool) { + switch program[pc] { + case opNPUSHB: + pc++ + if pc >= len(program) { + return 0, false + } + pc += int(program[pc]) + case opNPUSHW: + pc++ + if pc >= len(program) { + return 0, false + } + pc += 2 * int(program[pc]) + case opPUSHB000, opPUSHB001, opPUSHB010, opPUSHB011, + opPUSHB100, opPUSHB101, opPUSHB110, opPUSHB111: + pc += int(program[pc] - (opPUSHB000 - 1)) + case opPUSHW000, opPUSHW001, opPUSHW010, opPUSHW011, + opPUSHW100, opPUSHW101, opPUSHW110, opPUSHW111: + pc += 2 * int(program[pc]-(opPUSHW000-1)) + } + return pc, true +} + +// f2dot14 is a 2.14 fixed point number. +type f2dot14 int16 + +func normalize(x, y f2dot14) [2]f2dot14 { + fx, fy := float64(x), float64(y) + l := 0x4000 / math.Hypot(fx, fy) + fx *= l + if fx >= 0 { + fx += 0.5 + } else { + fx -= 0.5 + } + fy *= l + if fy >= 0 { + fy += 0.5 + } else { + fy -= 0.5 + } + return [2]f2dot14{f2dot14(fx), f2dot14(fy)} +} + +// f26dot6 is a 26.6 fixed point number. +type f26dot6 int32 + +// abs returns abs(x) in 26.6 fixed point arithmetic. +func (x f26dot6) abs() f26dot6 { + if x < 0 { + return -x + } + return x +} + +// div returns x/y in 26.6 fixed point arithmetic. +func (x f26dot6) div(y f26dot6) f26dot6 { + return f26dot6((int64(x) << 6) / int64(y)) +} + +// mul returns x*y in 26.6 fixed point arithmetic. +func (x f26dot6) mul(y f26dot6) f26dot6 { + return f26dot6((int64(x)*int64(y) + 1<<5) >> 6) +} + +// dotProduct returns the dot product of [x, y] and q. It is almost the same as +// px := int64(x) +// py := int64(y) +// qx := int64(q[0]) +// qy := int64(q[1]) +// return f26dot6((px*qx + py*qy + 1<<13) >> 14) +// except that the computation is done with 32-bit integers to produce exactly +// the same rounding behavior as C Freetype. +func dotProduct(x, y f26dot6, q [2]f2dot14) f26dot6 { + // Compute x*q[0] as 64-bit value. + l := uint32((int32(x) & 0xFFFF) * int32(q[0])) + m := (int32(x) >> 16) * int32(q[0]) + + lo1 := l + (uint32(m) << 16) + hi1 := (m >> 16) + (int32(l) >> 31) + bool2int32(lo1 < l) + + // Compute y*q[1] as 64-bit value. + l = uint32((int32(y) & 0xFFFF) * int32(q[1])) + m = (int32(y) >> 16) * int32(q[1]) + + lo2 := l + (uint32(m) << 16) + hi2 := (m >> 16) + (int32(l) >> 31) + bool2int32(lo2 < l) + + // Add them. + lo := lo1 + lo2 + hi := hi1 + hi2 + bool2int32(lo < lo1) + + // Divide the result by 2^14 with rounding. + s := hi >> 31 + l = lo + uint32(s) + hi += s + bool2int32(l < lo) + lo = l + + l = lo + 0x2000 + hi += bool2int32(l < lo) + + return f26dot6((uint32(hi) << 18) | (l >> 14)) +} + +// mulDiv returns x*y/z, rounded to the nearest integer. +func mulDiv(x, y, z int64) int64 { + xy := x * y + if z < 0 { + xy, z = -xy, -z + } + if xy >= 0 { + xy += z / 2 + } else { + xy -= z / 2 + } + return xy / z +} + +// round rounds the given number. The rounding algorithm is described at +// https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#rounding +func (h *hinter) round(x f26dot6) f26dot6 { + if h.gs.roundPeriod == 0 { + // Rounding is off. + return x + } + if x >= 0 { + ret := x - h.gs.roundPhase + h.gs.roundThreshold + if h.gs.roundSuper45 { + ret /= h.gs.roundPeriod + ret *= h.gs.roundPeriod + } else { + ret &= -h.gs.roundPeriod + } + if x != 0 && ret < 0 { + ret = 0 + } + return ret + h.gs.roundPhase + } + ret := -x - h.gs.roundPhase + h.gs.roundThreshold + if h.gs.roundSuper45 { + ret /= h.gs.roundPeriod + ret *= h.gs.roundPeriod + } else { + ret &= -h.gs.roundPeriod + } + if ret < 0 { + ret = 0 + } + return -ret - h.gs.roundPhase +} + +func bool2int32(b bool) int32 { + if b { + return 1 + } + return 0 +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint_test.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint_test.go new file mode 100644 index 000000000..c8b8d604d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/hint_test.go @@ -0,0 +1,673 @@ +// Copyright 2012 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +import ( + "reflect" + "strings" + "testing" +) + +func TestBytecode(t *testing.T) { + testCases := []struct { + desc string + prog []byte + want []int32 + errStr string + }{ + { + "underflow", + []byte{ + opDUP, + }, + nil, + "underflow", + }, + { + "infinite loop", + []byte{ + opPUSHW000, // [-1] + 0xff, + 0xff, + opDUP, // [-1, -1] + opJMPR, // [-1] + }, + nil, + "too many steps", + }, + { + "unbalanced if/else", + []byte{ + opPUSHB000, // [0] + 0, + opIF, + }, + nil, + "unbalanced", + }, + { + "vector set/gets", + []byte{ + opSVTCA1, // [] + opGPV, // [0x4000, 0] + opSVTCA0, // [0x4000, 0] + opGFV, // [0x4000, 0, 0, 0x4000] + opNEG, // [0x4000, 0, 0, -0x4000] + opSPVFS, // [0x4000, 0] + opSFVTPV, // [0x4000, 0] + opPUSHB000, // [0x4000, 0, 1] + 1, + opGFV, // [0x4000, 0, 1, 0, -0x4000] + opPUSHB000, // [0x4000, 0, 1, 0, -0x4000, 2] + 2, + }, + []int32{0x4000, 0, 1, 0, -0x4000, 2}, + "", + }, + { + "jumps", + []byte{ + opPUSHB001, // [10, 2] + 10, + 2, + opJMPR, // [10] + opDUP, // not executed + opDUP, // [10, 10] + opPUSHB010, // [10, 10, 20, 2, 1] + 20, + 2, + 1, + opJROT, // [10, 10, 20] + opDUP, // not executed + opDUP, // [10, 10, 20, 20] + opPUSHB010, // [10, 10, 20, 20, 30, 2, 1] + 30, + 2, + 1, + opJROF, // [10, 10, 20, 20, 30] + opDUP, // [10, 10, 20, 20, 30, 30] + opDUP, // [10, 10, 20, 20, 30, 30, 30] + }, + []int32{10, 10, 20, 20, 30, 30, 30}, + "", + }, + { + "stack ops", + []byte{ + opPUSHB010, // [10, 20, 30] + 10, + 20, + 30, + opCLEAR, // [] + opPUSHB010, // [40, 50, 60] + 40, + 50, + 60, + opSWAP, // [40, 60, 50] + opDUP, // [40, 60, 50, 50] + opDUP, // [40, 60, 50, 50, 50] + opPOP, // [40, 60, 50, 50] + opDEPTH, // [40, 60, 50, 50, 4] + opCINDEX, // [40, 60, 50, 50, 40] + opPUSHB000, // [40, 60, 50, 50, 40, 4] + 4, + opMINDEX, // [40, 50, 50, 40, 60] + }, + []int32{40, 50, 50, 40, 60}, + "", + }, + { + "push ops", + []byte{ + opPUSHB000, // [255] + 255, + opPUSHW001, // [255, -2, 253] + 255, + 254, + 0, + 253, + opNPUSHB, // [1, -2, 253, 1, 2] + 2, + 1, + 2, + opNPUSHW, // [1, -2, 253, 1, 2, 0x0405, 0x0607, 0x0809] + 3, + 4, + 5, + 6, + 7, + 8, + 9, + }, + []int32{255, -2, 253, 1, 2, 0x0405, 0x0607, 0x0809}, + "", + }, + { + "store ops", + []byte{ + opPUSHB011, // [1, 22, 3, 44] + 1, + 22, + 3, + 44, + opWS, // [1, 22] + opWS, // [] + opPUSHB000, // [3] + 3, + opRS, // [44] + }, + []int32{44}, + "", + }, + { + "comparison ops", + []byte{ + opPUSHB001, // [10, 20] + 10, + 20, + opLT, // [1] + opPUSHB001, // [1, 10, 20] + 10, + 20, + opLTEQ, // [1, 1] + opPUSHB001, // [1, 1, 10, 20] + 10, + 20, + opGT, // [1, 1, 0] + opPUSHB001, // [1, 1, 0, 10, 20] + 10, + 20, + opGTEQ, // [1, 1, 0, 0] + opEQ, // [1, 1, 1] + opNEQ, // [1, 0] + }, + []int32{1, 0}, + "", + }, + { + "odd/even", + // Calculate odd(2+31/64), odd(2+32/64), even(2), even(1). + []byte{ + opPUSHB000, // [159] + 159, + opODD, // [0] + opPUSHB000, // [0, 160] + 160, + opODD, // [0, 1] + opPUSHB000, // [0, 1, 128] + 128, + opEVEN, // [0, 1, 1] + opPUSHB000, // [0, 1, 1, 64] + 64, + opEVEN, // [0, 1, 1, 0] + }, + []int32{0, 1, 1, 0}, + "", + }, + { + "if true", + []byte{ + opPUSHB001, // [255, 1] + 255, + 1, + opIF, + opPUSHB000, // [255, 2] + 2, + opEIF, + opPUSHB000, // [255, 2, 254] + 254, + }, + []int32{255, 2, 254}, + "", + }, + { + "if false", + []byte{ + opPUSHB001, // [255, 0] + 255, + 0, + opIF, + opPUSHB000, // [255] + 2, + opEIF, + opPUSHB000, // [255, 254] + 254, + }, + []int32{255, 254}, + "", + }, + { + "if/else true", + []byte{ + opPUSHB000, // [1] + 1, + opIF, + opPUSHB000, // [2] + 2, + opELSE, + opPUSHB000, // not executed + 3, + opEIF, + }, + []int32{2}, + "", + }, + { + "if/else false", + []byte{ + opPUSHB000, // [0] + 0, + opIF, + opPUSHB000, // not executed + 2, + opELSE, + opPUSHB000, // [3] + 3, + opEIF, + }, + []int32{3}, + "", + }, + { + "if/else true if/else false", + // 0x58 is the opcode for opIF. The literal 0x58s below are pushed data. + []byte{ + opPUSHB010, // [255, 0, 1] + 255, + 0, + 1, + opIF, + opIF, + opPUSHB001, // not executed + 0x58, + 0x58, + opELSE, + opPUSHW000, // [255, 0x5858] + 0x58, + 0x58, + opEIF, + opELSE, + opIF, + opNPUSHB, // not executed + 3, + 0x58, + 0x58, + 0x58, + opELSE, + opNPUSHW, // not executed + 2, + 0x58, + 0x58, + 0x58, + 0x58, + opEIF, + opEIF, + opPUSHB000, // [255, 0x5858, 254] + 254, + }, + []int32{255, 0x5858, 254}, + "", + }, + { + "if/else false if/else true", + // 0x58 is the opcode for opIF. The literal 0x58s below are pushed data. + []byte{ + opPUSHB010, // [255, 1, 0] + 255, + 1, + 0, + opIF, + opIF, + opPUSHB001, // not executed + 0x58, + 0x58, + opELSE, + opPUSHW000, // not executed + 0x58, + 0x58, + opEIF, + opELSE, + opIF, + opNPUSHB, // [255, 0x58, 0x58, 0x58] + 3, + 0x58, + 0x58, + 0x58, + opELSE, + opNPUSHW, // not executed + 2, + 0x58, + 0x58, + 0x58, + 0x58, + opEIF, + opEIF, + opPUSHB000, // [255, 0x58, 0x58, 0x58, 254] + 254, + }, + []int32{255, 0x58, 0x58, 0x58, 254}, + "", + }, + { + "logical ops", + []byte{ + opPUSHB010, // [0, 10, 20] + 0, + 10, + 20, + opAND, // [0, 1] + opOR, // [1] + opNOT, // [0] + }, + []int32{0}, + "", + }, + { + "arithmetic ops", + // Calculate abs((-(1 - (2*3)))/2 + 1/64). + // The answer is 5/2 + 1/64 in ideal numbers, or 161 in 26.6 fixed point math. + []byte{ + opPUSHB010, // [64, 128, 192] + 1 << 6, + 2 << 6, + 3 << 6, + opMUL, // [64, 384] + opSUB, // [-320] + opNEG, // [320] + opPUSHB000, // [320, 128] + 2 << 6, + opDIV, // [160] + opPUSHB000, // [160, 1] + 1, + opADD, // [161] + opABS, // [161] + }, + []int32{161}, + "", + }, + { + "floor, ceiling", + []byte{ + opPUSHB000, // [96] + 96, + opFLOOR, // [64] + opPUSHB000, // [64, 96] + 96, + opCEILING, // [64, 128] + }, + []int32{64, 128}, + "", + }, + { + "rounding", + // Round 1.40625 (which is 90/64) under various rounding policies. + // See figure 20 of https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#rounding + []byte{ + opROFF, // [] + opPUSHB000, // [90] + 90, + opROUND00, // [90] + opRTG, // [90] + opPUSHB000, // [90, 90] + 90, + opROUND00, // [90, 64] + opRTHG, // [90, 64] + opPUSHB000, // [90, 64, 90] + 90, + opROUND00, // [90, 64, 96] + opRDTG, // [90, 64, 96] + opPUSHB000, // [90, 64, 96, 90] + 90, + opROUND00, // [90, 64, 96, 64] + opRUTG, // [90, 64, 96, 64] + opPUSHB000, // [90, 64, 96, 64, 90] + 90, + opROUND00, // [90, 64, 96, 64, 128] + opRTDG, // [90, 64, 96, 64, 128] + opPUSHB000, // [90, 64, 96, 64, 128, 90] + 90, + opROUND00, // [90, 64, 96, 64, 128, 96] + }, + []int32{90, 64, 96, 64, 128, 96}, + "", + }, + { + "super-rounding", + // See figure 20 of https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#rounding + // and the sign preservation steps of the "Order of rounding operations" section. + []byte{ + opPUSHB000, // [0x58] + 0x58, + opSROUND, // [] + opPUSHW000, // [-81] + 0xff, + 0xaf, + opROUND00, // [-80] + opPUSHW000, // [-80, -80] + 0xff, + 0xb0, + opROUND00, // [-80, -80] + opPUSHW000, // [-80, -80, -17] + 0xff, + 0xef, + opROUND00, // [-80, -80, -16] + opPUSHW000, // [-80, -80, -16, -16] + 0xff, + 0xf0, + opROUND00, // [-80, -80, -16, -16] + opPUSHB000, // [-80, -80, -16, -16, 0] + 0, + opROUND00, // [-80, -80, -16, -16, 16] + opPUSHB000, // [-80, -80, -16, -16, 16, 16] + 16, + opROUND00, // [-80, -80, -16, -16, 16, 16] + opPUSHB000, // [-80, -80, -16, -16, 16, 16, 47] + 47, + opROUND00, // [-80, -80, -16, -16, 16, 16, 16] + opPUSHB000, // [-80, -80, -16, -16, 16, 16, 16, 48] + 48, + opROUND00, // [-80, -80, -16, -16, 16, 16, 16, 80] + }, + []int32{-80, -80, -16, -16, 16, 16, 16, 80}, + "", + }, + { + "roll", + []byte{ + opPUSHB010, // [1, 2, 3] + 1, + 2, + 3, + opROLL, // [2, 3, 1] + }, + []int32{2, 3, 1}, + "", + }, + { + "max/min", + []byte{ + opPUSHW001, // [-2, -3] + 0xff, + 0xfe, + 0xff, + 0xfd, + opMAX, // [-2] + opPUSHW001, // [-2, -4, -5] + 0xff, + 0xfc, + 0xff, + 0xfb, + opMIN, // [-2, -5] + }, + []int32{-2, -5}, + "", + }, + { + "functions", + []byte{ + opPUSHB011, // [3, 7, 0, 3] + 3, + 7, + 0, + 3, + + opFDEF, // Function #3 (not called) + opPUSHB000, + 98, + opENDF, + + opFDEF, // Function #0 + opDUP, + opADD, + opENDF, + + opFDEF, // Function #7 + opPUSHB001, + 10, + 0, + opCALL, + opDUP, + opENDF, + + opFDEF, // Function #3 (again) + opPUSHB000, + 99, + opENDF, + + opPUSHB001, // [2, 0] + 2, + 0, + opCALL, // [4] + opPUSHB000, // [4, 3] + 3, + opLOOPCALL, // [99, 99, 99, 99] + opPUSHB000, // [99, 99, 99, 99, 7] + 7, + opCALL, // [99, 99, 99, 99, 20, 20] + }, + []int32{99, 99, 99, 99, 20, 20}, + "", + }, + } + + for _, tc := range testCases { + h := &hinter{} + h.init(&Font{ + maxStorage: 32, + maxStackElements: 100, + }, 768) + err, errStr := h.run(tc.prog, nil, nil, nil, nil), "" + if err != nil { + errStr = err.Error() + } + if tc.errStr != "" { + if errStr == "" { + t.Errorf("%s: got no error, want %q", tc.desc, tc.errStr) + } else if !strings.Contains(errStr, tc.errStr) { + t.Errorf("%s: got error %q, want one containing %q", tc.desc, errStr, tc.errStr) + } + continue + } + if errStr != "" { + t.Errorf("%s: got error %q, want none", tc.desc, errStr) + continue + } + got := h.stack[:len(tc.want)] + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + continue + } + } +} + +// TestMove tests that the hinter.move method matches the output of the C +// Freetype implementation. +func TestMove(t *testing.T) { + h, p := hinter{}, Point{} + testCases := []struct { + pvX, pvY, fvX, fvY f2dot14 + wantX, wantY int32 + }{ + {+0x4000, +0x0000, +0x4000, +0x0000, +1000, +0}, + {+0x4000, +0x0000, -0x4000, +0x0000, +1000, +0}, + {-0x4000, +0x0000, +0x4000, +0x0000, -1000, +0}, + {-0x4000, +0x0000, -0x4000, +0x0000, -1000, +0}, + {+0x0000, +0x4000, +0x0000, +0x4000, +0, +1000}, + {+0x0000, +0x4000, +0x0000, -0x4000, +0, +1000}, + {+0x4000, +0x0000, +0x2d41, +0x2d41, +1000, +1000}, + {+0x4000, +0x0000, -0x2d41, +0x2d41, +1000, -1000}, + {+0x4000, +0x0000, +0x2d41, -0x2d41, +1000, -1000}, + {+0x4000, +0x0000, -0x2d41, -0x2d41, +1000, +1000}, + {-0x4000, +0x0000, +0x2d41, +0x2d41, -1000, -1000}, + {-0x4000, +0x0000, -0x2d41, +0x2d41, -1000, +1000}, + {-0x4000, +0x0000, +0x2d41, -0x2d41, -1000, +1000}, + {-0x4000, +0x0000, -0x2d41, -0x2d41, -1000, -1000}, + {+0x376d, +0x2000, +0x2d41, +0x2d41, +732, +732}, + {-0x376d, +0x2000, +0x2d41, +0x2d41, -2732, -2732}, + {+0x376d, +0x2000, +0x2d41, -0x2d41, +2732, -2732}, + {-0x376d, +0x2000, +0x2d41, -0x2d41, -732, +732}, + {-0x376d, -0x2000, +0x2d41, +0x2d41, -732, -732}, + {+0x376d, +0x2000, +0x4000, +0x0000, +1155, +0}, + {+0x376d, +0x2000, +0x0000, +0x4000, +0, +2000}, + } + for _, tc := range testCases { + p = Point{} + h.gs.pv = [2]f2dot14{tc.pvX, tc.pvY} + h.gs.fv = [2]f2dot14{tc.fvX, tc.fvY} + h.move(&p, 1000, true) + tx := p.Flags&flagTouchedX != 0 + ty := p.Flags&flagTouchedY != 0 + wantTX := tc.fvX != 0 + wantTY := tc.fvY != 0 + if p.X != tc.wantX || p.Y != tc.wantY || tx != wantTX || ty != wantTY { + t.Errorf("pv=%v, fv=%v\ngot %d, %d, %t, %t\nwant %d, %d, %t, %t", + h.gs.pv, h.gs.fv, p.X, p.Y, tx, ty, tc.wantX, tc.wantY, wantTX, wantTY) + continue + } + + // Check that p is aligned with the freedom vector. + a := int64(p.X) * int64(tc.fvY) + b := int64(p.Y) * int64(tc.fvX) + if a != b { + t.Errorf("pv=%v, fv=%v, p=%v not aligned with fv", h.gs.pv, h.gs.fv, p) + continue + } + + // Check that the projected p is 1000 away from the origin. + dotProd := (int64(p.X)*int64(tc.pvX) + int64(p.Y)*int64(tc.pvY) + 1<<13) >> 14 + if dotProd != 1000 { + t.Errorf("pv=%v, fv=%v, p=%v not 1000 from origin", h.gs.pv, h.gs.fv, p) + continue + } + } +} + +// TestNormalize tests that the normalize function matches the output of the C +// Freetype implementation. +func TestNormalize(t *testing.T) { + testCases := [][2]f2dot14{ + {-15895, 3974}, + {-15543, 5181}, + {-14654, 7327}, + {-11585, 11585}, + {0, 16384}, + {11585, 11585}, + {14654, 7327}, + {15543, 5181}, + {15895, 3974}, + {16066, 3213}, + {16161, 2694}, + {16219, 2317}, + {16257, 2032}, + {16284, 1809}, + } + for i, want := range testCases { + got := normalize(f2dot14(i)-4, 1) + if got != want { + t.Errorf("i=%d: got %v, want %v", i, got, want) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/opcodes.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/opcodes.go new file mode 100644 index 000000000..1880e1e63 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/opcodes.go @@ -0,0 +1,289 @@ +// Copyright 2012 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +// The Truetype opcodes are summarized at +// https://developer.apple.com/fonts/TTRefMan/RM07/appendixA.html + +const ( + opSVTCA0 = 0x00 // Set freedom and projection Vectors To Coordinate Axis + opSVTCA1 = 0x01 // . + opSPVTCA0 = 0x02 // Set Projection Vector To Coordinate Axis + opSPVTCA1 = 0x03 // . + opSFVTCA0 = 0x04 // Set Freedom Vector to Coordinate Axis + opSFVTCA1 = 0x05 // . + opSPVTL0 = 0x06 // Set Projection Vector To Line + opSPVTL1 = 0x07 // . + opSFVTL0 = 0x08 // Set Freedom Vector To Line + opSFVTL1 = 0x09 // . + opSPVFS = 0x0a // Set Projection Vector From Stack + opSFVFS = 0x0b // Set Freedom Vector From Stack + opGPV = 0x0c // Get Projection Vector + opGFV = 0x0d // Get Freedom Vector + opSFVTPV = 0x0e // Set Freedom Vector To Projection Vector + opISECT = 0x0f // moves point p to the InterSECTion of two lines + opSRP0 = 0x10 // Set Reference Point 0 + opSRP1 = 0x11 // Set Reference Point 1 + opSRP2 = 0x12 // Set Reference Point 2 + opSZP0 = 0x13 // Set Zone Pointer 0 + opSZP1 = 0x14 // Set Zone Pointer 1 + opSZP2 = 0x15 // Set Zone Pointer 2 + opSZPS = 0x16 // Set Zone PointerS + opSLOOP = 0x17 // Set LOOP variable + opRTG = 0x18 // Round To Grid + opRTHG = 0x19 // Round To Half Grid + opSMD = 0x1a // Set Minimum Distance + opELSE = 0x1b // ELSE clause + opJMPR = 0x1c // JuMP Relative + opSCVTCI = 0x1d // Set Control Value Table Cut-In + opSSWCI = 0x1e // Set Single Width Cut-In + opSSW = 0x1f // Set Single Width + opDUP = 0x20 // DUPlicate top stack element + opPOP = 0x21 // POP top stack element + opCLEAR = 0x22 // CLEAR the stack + opSWAP = 0x23 // SWAP the top two elements on the stack + opDEPTH = 0x24 // DEPTH of the stack + opCINDEX = 0x25 // Copy the INDEXed element to the top of the stack + opMINDEX = 0x26 // Move the INDEXed element to the top of the stack + opALIGNPTS = 0x27 // ALIGN PoinTS + op_0x28 = 0x28 // deprecated + opUTP = 0x29 // UnTouch Point + opLOOPCALL = 0x2a // LOOP and CALL function + opCALL = 0x2b // CALL function + opFDEF = 0x2c // Function DEFinition + opENDF = 0x2d // END Function definition + opMDAP0 = 0x2e // Move Direct Absolute Point + opMDAP1 = 0x2f // . + opIUP0 = 0x30 // Interpolate Untouched Points through the outline + opIUP1 = 0x31 // . + opSHP0 = 0x32 // SHift Point using reference point + opSHP1 = 0x33 // . + opSHC0 = 0x34 // SHift Contour using reference point + opSHC1 = 0x35 // . + opSHZ0 = 0x36 // SHift Zone using reference point + opSHZ1 = 0x37 // . + opSHPIX = 0x38 // SHift point by a PIXel amount + opIP = 0x39 // Interpolate Point + opMSIRP0 = 0x3a // Move Stack Indirect Relative Point + opMSIRP1 = 0x3b // . + opALIGNRP = 0x3c // ALIGN to Reference Point + opRTDG = 0x3d // Round To Double Grid + opMIAP0 = 0x3e // Move Indirect Absolute Point + opMIAP1 = 0x3f // . + opNPUSHB = 0x40 // PUSH N Bytes + opNPUSHW = 0x41 // PUSH N Words + opWS = 0x42 // Write Store + opRS = 0x43 // Read Store + opWCVTP = 0x44 // Write Control Value Table in Pixel units + opRCVT = 0x45 // Read Control Value Table entry + opGC0 = 0x46 // Get Coordinate projected onto the projection vector + opGC1 = 0x47 // . + opSCFS = 0x48 // Sets Coordinate From the Stack using projection vector and freedom vector + opMD0 = 0x49 // Measure Distance + opMD1 = 0x4a // . + opMPPEM = 0x4b // Measure Pixels Per EM + opMPS = 0x4c // Measure Point Size + opFLIPON = 0x4d // set the auto FLIP Boolean to ON + opFLIPOFF = 0x4e // set the auto FLIP Boolean to OFF + opDEBUG = 0x4f // DEBUG call + opLT = 0x50 // Less Than + opLTEQ = 0x51 // Less Than or EQual + opGT = 0x52 // Greater Than + opGTEQ = 0x53 // Greater Than or EQual + opEQ = 0x54 // EQual + opNEQ = 0x55 // Not EQual + opODD = 0x56 // ODD + opEVEN = 0x57 // EVEN + opIF = 0x58 // IF test + opEIF = 0x59 // End IF + opAND = 0x5a // logical AND + opOR = 0x5b // logical OR + opNOT = 0x5c // logical NOT + opDELTAP1 = 0x5d // DELTA exception P1 + opSDB = 0x5e // Set Delta Base in the graphics state + opSDS = 0x5f // Set Delta Shift in the graphics state + opADD = 0x60 // ADD + opSUB = 0x61 // SUBtract + opDIV = 0x62 // DIVide + opMUL = 0x63 // MULtiply + opABS = 0x64 // ABSolute value + opNEG = 0x65 // NEGate + opFLOOR = 0x66 // FLOOR + opCEILING = 0x67 // CEILING + opROUND00 = 0x68 // ROUND value + opROUND01 = 0x69 // . + opROUND10 = 0x6a // . + opROUND11 = 0x6b // . + opNROUND00 = 0x6c // No ROUNDing of value + opNROUND01 = 0x6d // . + opNROUND10 = 0x6e // . + opNROUND11 = 0x6f // . + opWCVTF = 0x70 // Write Control Value Table in Funits + opDELTAP2 = 0x71 // DELTA exception P2 + opDELTAP3 = 0x72 // DELTA exception P3 + opDELTAC1 = 0x73 // DELTA exception C1 + opDELTAC2 = 0x74 // DELTA exception C2 + opDELTAC3 = 0x75 // DELTA exception C3 + opSROUND = 0x76 // Super ROUND + opS45ROUND = 0x77 // Super ROUND 45 degrees + opJROT = 0x78 // Jump Relative On True + opJROF = 0x79 // Jump Relative On False + opROFF = 0x7a // Round OFF + op_0x7b = 0x7b // deprecated + opRUTG = 0x7c // Round Up To Grid + opRDTG = 0x7d // Round Down To Grid + opSANGW = 0x7e // Set ANGle Weight + opAA = 0x7f // Adjust Angle + opFLIPPT = 0x80 // FLIP PoinT + opFLIPRGON = 0x81 // FLIP RanGe ON + opFLIPRGOFF = 0x82 // FLIP RanGe OFF + op_0x83 = 0x83 // deprecated + op_0x84 = 0x84 // deprecated + opSCANCTRL = 0x85 // SCAN conversion ConTRoL + opSDPVTL0 = 0x86 // Set Dual Projection Vector To Line + opSDPVTL1 = 0x87 // . + opGETINFO = 0x88 // GET INFOrmation + opIDEF = 0x89 // Instruction DEFinition + opROLL = 0x8a // ROLL the top three stack elements + opMAX = 0x8b // MAXimum of top two stack elements + opMIN = 0x8c // MINimum of top two stack elements + opSCANTYPE = 0x8d // SCANTYPE + opINSTCTRL = 0x8e // INSTRuction execution ConTRoL + op_0x8f = 0x8f + op_0x90 = 0x90 + op_0x91 = 0x91 + op_0x92 = 0x92 + op_0x93 = 0x93 + op_0x94 = 0x94 + op_0x95 = 0x95 + op_0x96 = 0x96 + op_0x97 = 0x97 + op_0x98 = 0x98 + op_0x99 = 0x99 + op_0x9a = 0x9a + op_0x9b = 0x9b + op_0x9c = 0x9c + op_0x9d = 0x9d + op_0x9e = 0x9e + op_0x9f = 0x9f + op_0xa0 = 0xa0 + op_0xa1 = 0xa1 + op_0xa2 = 0xa2 + op_0xa3 = 0xa3 + op_0xa4 = 0xa4 + op_0xa5 = 0xa5 + op_0xa6 = 0xa6 + op_0xa7 = 0xa7 + op_0xa8 = 0xa8 + op_0xa9 = 0xa9 + op_0xaa = 0xaa + op_0xab = 0xab + op_0xac = 0xac + op_0xad = 0xad + op_0xae = 0xae + op_0xaf = 0xaf + opPUSHB000 = 0xb0 // PUSH Bytes + opPUSHB001 = 0xb1 // . + opPUSHB010 = 0xb2 // . + opPUSHB011 = 0xb3 // . + opPUSHB100 = 0xb4 // . + opPUSHB101 = 0xb5 // . + opPUSHB110 = 0xb6 // . + opPUSHB111 = 0xb7 // . + opPUSHW000 = 0xb8 // PUSH Words + opPUSHW001 = 0xb9 // . + opPUSHW010 = 0xba // . + opPUSHW011 = 0xbb // . + opPUSHW100 = 0xbc // . + opPUSHW101 = 0xbd // . + opPUSHW110 = 0xbe // . + opPUSHW111 = 0xbf // . + opMDRP00000 = 0xc0 // Move Direct Relative Point + opMDRP00001 = 0xc1 // . + opMDRP00010 = 0xc2 // . + opMDRP00011 = 0xc3 // . + opMDRP00100 = 0xc4 // . + opMDRP00101 = 0xc5 // . + opMDRP00110 = 0xc6 // . + opMDRP00111 = 0xc7 // . + opMDRP01000 = 0xc8 // . + opMDRP01001 = 0xc9 // . + opMDRP01010 = 0xca // . + opMDRP01011 = 0xcb // . + opMDRP01100 = 0xcc // . + opMDRP01101 = 0xcd // . + opMDRP01110 = 0xce // . + opMDRP01111 = 0xcf // . + opMDRP10000 = 0xd0 // . + opMDRP10001 = 0xd1 // . + opMDRP10010 = 0xd2 // . + opMDRP10011 = 0xd3 // . + opMDRP10100 = 0xd4 // . + opMDRP10101 = 0xd5 // . + opMDRP10110 = 0xd6 // . + opMDRP10111 = 0xd7 // . + opMDRP11000 = 0xd8 // . + opMDRP11001 = 0xd9 // . + opMDRP11010 = 0xda // . + opMDRP11011 = 0xdb // . + opMDRP11100 = 0xdc // . + opMDRP11101 = 0xdd // . + opMDRP11110 = 0xde // . + opMDRP11111 = 0xdf // . + opMIRP00000 = 0xe0 // Move Indirect Relative Point + opMIRP00001 = 0xe1 // . + opMIRP00010 = 0xe2 // . + opMIRP00011 = 0xe3 // . + opMIRP00100 = 0xe4 // . + opMIRP00101 = 0xe5 // . + opMIRP00110 = 0xe6 // . + opMIRP00111 = 0xe7 // . + opMIRP01000 = 0xe8 // . + opMIRP01001 = 0xe9 // . + opMIRP01010 = 0xea // . + opMIRP01011 = 0xeb // . + opMIRP01100 = 0xec // . + opMIRP01101 = 0xed // . + opMIRP01110 = 0xee // . + opMIRP01111 = 0xef // . + opMIRP10000 = 0xf0 // . + opMIRP10001 = 0xf1 // . + opMIRP10010 = 0xf2 // . + opMIRP10011 = 0xf3 // . + opMIRP10100 = 0xf4 // . + opMIRP10101 = 0xf5 // . + opMIRP10110 = 0xf6 // . + opMIRP10111 = 0xf7 // . + opMIRP11000 = 0xf8 // . + opMIRP11001 = 0xf9 // . + opMIRP11010 = 0xfa // . + opMIRP11011 = 0xfb // . + opMIRP11100 = 0xfc // . + opMIRP11101 = 0xfd // . + opMIRP11110 = 0xfe // . + opMIRP11111 = 0xff // . +) + +// popCount is the number of stack elements that each opcode pops. +var popCount = [256]uint8{ + // 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f + 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 5, // 0x00 - 0x0f + 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, // 0x10 - 0x1f + 1, 1, 0, 2, 0, 1, 1, 2, 0, 1, 2, 1, 1, 0, 1, 1, // 0x20 - 0x2f + 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 2, 2, 0, 0, 2, 2, // 0x30 - 0x3f + 0, 0, 2, 1, 2, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, // 0x40 - 0x4f + 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 2, 2, 1, 1, 1, 1, // 0x50 - 0x5f + 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x60 - 0x6f + 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 0, 0, 0, 0, 1, 1, // 0x70 - 0x7f + 0, 2, 2, 0, 0, 1, 2, 2, 1, 1, 3, 2, 2, 1, 2, 0, // 0x80 - 0x8f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x90 - 0x9f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xa0 - 0xaf + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xb0 - 0xbf + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0xc0 - 0xcf + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0xd0 - 0xdf + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xe0 - 0xef + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xf0 - 0xff +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go new file mode 100644 index 000000000..96ceef547 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go @@ -0,0 +1,554 @@ +// Copyright 2010 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +// Package truetype provides a parser for the TTF and TTC file formats. +// Those formats are documented at http://developer.apple.com/fonts/TTRefMan/ +// and http://www.microsoft.com/typography/otspec/ +// +// Some of a font's methods provide lengths or co-ordinates, e.g. bounds, font +// metrics and control points. All these methods take a scale parameter, which +// is the number of device units in 1 em. For example, if 1 em is 10 pixels and +// 1 pixel is 64 units, then scale is 640. If the device space involves pixels, +// 64 units per pixel is recommended, since that is what the bytecode hinter +// uses when snapping point co-ordinates to the pixel grid. +// +// To measure a TrueType font in ideal FUnit space, use scale equal to +// font.FUnitsPerEm(). +package truetype + +import ( + "fmt" +) + +// An Index is a Font's index of a rune. +type Index uint16 + +// A Bounds holds the co-ordinate range of one or more glyphs. +// The endpoints are inclusive. +type Bounds struct { + XMin, YMin, XMax, YMax int32 +} + +// An HMetric holds the horizontal metrics of a single glyph. +type HMetric struct { + AdvanceWidth, LeftSideBearing int32 +} + +// A VMetric holds the vertical metrics of a single glyph. +type VMetric struct { + AdvanceHeight, TopSideBearing int32 +} + +// A FormatError reports that the input is not a valid TrueType font. +type FormatError string + +func (e FormatError) Error() string { + return "freetype: invalid TrueType format: " + string(e) +} + +// An UnsupportedError reports that the input uses a valid but unimplemented +// TrueType feature. +type UnsupportedError string + +func (e UnsupportedError) Error() string { + return "freetype: unsupported TrueType feature: " + string(e) +} + +// u32 returns the big-endian uint32 at b[i:]. +func u32(b []byte, i int) uint32 { + return uint32(b[i])<<24 | uint32(b[i+1])<<16 | uint32(b[i+2])<<8 | uint32(b[i+3]) +} + +// u16 returns the big-endian uint16 at b[i:]. +func u16(b []byte, i int) uint16 { + return uint16(b[i])<<8 | uint16(b[i+1]) +} + +// readTable returns a slice of the TTF data given by a table's directory entry. +func readTable(ttf []byte, offsetLength []byte) ([]byte, error) { + offset := int(u32(offsetLength, 0)) + if offset < 0 { + return nil, FormatError(fmt.Sprintf("offset too large: %d", uint32(offset))) + } + length := int(u32(offsetLength, 4)) + if length < 0 { + return nil, FormatError(fmt.Sprintf("length too large: %d", uint32(length))) + } + end := offset + length + if end < 0 || end > len(ttf) { + return nil, FormatError(fmt.Sprintf("offset + length too large: %d", uint32(offset)+uint32(length))) + } + return ttf[offset:end], nil +} + +const ( + locaOffsetFormatUnknown int = iota + locaOffsetFormatShort + locaOffsetFormatLong +) + +// A cm holds a parsed cmap entry. +type cm struct { + start, end, delta, offset uint32 +} + +// A Font represents a Truetype font. +type Font struct { + // Tables sliced from the TTF data. The different tables are documented + // at http://developer.apple.com/fonts/TTRefMan/RM06/Chap6.html + cmap, cvt, fpgm, glyf, hdmx, head, hhea, hmtx, kern, loca, maxp, os2, prep, vmtx []byte + + cmapIndexes []byte + + // Cached values derived from the raw ttf data. + cm []cm + locaOffsetFormat int + nGlyph, nHMetric, nKern int + fUnitsPerEm int32 + bounds Bounds + // Values from the maxp section. + maxTwilightPoints, maxStorage, maxFunctionDefs, maxStackElements uint16 +} + +func (f *Font) parseCmap() error { + const ( + cmapFormat4 = 4 + cmapFormat12 = 12 + languageIndependent = 0 + + // A 32-bit encoding consists of a most-significant 16-bit Platform ID and a + // least-significant 16-bit Platform Specific ID. The magic numbers are + // specified at https://www.microsoft.com/typography/otspec/name.htm + unicodeEncoding = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0) + microsoftSymbolEncoding = 0x00030000 // PID = 3 (Microsoft), PSID = 0 (Symbol) + microsoftUCS2Encoding = 0x00030001 // PID = 3 (Microsoft), PSID = 1 (UCS-2) + microsoftUCS4Encoding = 0x0003000a // PID = 3 (Microsoft), PSID = 10 (UCS-4) + ) + + if len(f.cmap) < 4 { + return FormatError("cmap too short") + } + nsubtab := int(u16(f.cmap, 2)) + if len(f.cmap) < 8*nsubtab+4 { + return FormatError("cmap too short") + } + offset, found, x := 0, false, 4 + for i := 0; i < nsubtab; i++ { + // We read the 16-bit Platform ID and 16-bit Platform Specific ID as a single uint32. + // All values are big-endian. + pidPsid, o := u32(f.cmap, x), u32(f.cmap, x+4) + x += 8 + // We prefer the Unicode cmap encoding. Failing to find that, we fall + // back onto the Microsoft cmap encoding. + if pidPsid == unicodeEncoding { + offset, found = int(o), true + break + + } else if pidPsid == microsoftSymbolEncoding || + pidPsid == microsoftUCS2Encoding || + pidPsid == microsoftUCS4Encoding { + + offset, found = int(o), true + // We don't break out of the for loop, so that Unicode can override Microsoft. + } + } + if !found { + return UnsupportedError("cmap encoding") + } + if offset <= 0 || offset > len(f.cmap) { + return FormatError("bad cmap offset") + } + + cmapFormat := u16(f.cmap, offset) + switch cmapFormat { + case cmapFormat4: + language := u16(f.cmap, offset+4) + if language != languageIndependent { + return UnsupportedError(fmt.Sprintf("language: %d", language)) + } + segCountX2 := int(u16(f.cmap, offset+6)) + if segCountX2%2 == 1 { + return FormatError(fmt.Sprintf("bad segCountX2: %d", segCountX2)) + } + segCount := segCountX2 / 2 + offset += 14 + f.cm = make([]cm, segCount) + for i := 0; i < segCount; i++ { + f.cm[i].end = uint32(u16(f.cmap, offset)) + offset += 2 + } + offset += 2 + for i := 0; i < segCount; i++ { + f.cm[i].start = uint32(u16(f.cmap, offset)) + offset += 2 + } + for i := 0; i < segCount; i++ { + f.cm[i].delta = uint32(u16(f.cmap, offset)) + offset += 2 + } + for i := 0; i < segCount; i++ { + f.cm[i].offset = uint32(u16(f.cmap, offset)) + offset += 2 + } + f.cmapIndexes = f.cmap[offset:] + return nil + + case cmapFormat12: + if u16(f.cmap, offset+2) != 0 { + return FormatError(fmt.Sprintf("cmap format: % x", f.cmap[offset:offset+4])) + } + length := u32(f.cmap, offset+4) + language := u32(f.cmap, offset+8) + if language != languageIndependent { + return UnsupportedError(fmt.Sprintf("language: %d", language)) + } + nGroups := u32(f.cmap, offset+12) + if length != 12*nGroups+16 { + return FormatError("inconsistent cmap length") + } + offset += 16 + f.cm = make([]cm, nGroups) + for i := uint32(0); i < nGroups; i++ { + f.cm[i].start = u32(f.cmap, offset+0) + f.cm[i].end = u32(f.cmap, offset+4) + f.cm[i].delta = u32(f.cmap, offset+8) - f.cm[i].start + offset += 12 + } + return nil + } + return UnsupportedError(fmt.Sprintf("cmap format: %d", cmapFormat)) +} + +func (f *Font) parseHead() error { + if len(f.head) != 54 { + return FormatError(fmt.Sprintf("bad head length: %d", len(f.head))) + } + f.fUnitsPerEm = int32(u16(f.head, 18)) + f.bounds.XMin = int32(int16(u16(f.head, 36))) + f.bounds.YMin = int32(int16(u16(f.head, 38))) + f.bounds.XMax = int32(int16(u16(f.head, 40))) + f.bounds.YMax = int32(int16(u16(f.head, 42))) + switch i := u16(f.head, 50); i { + case 0: + f.locaOffsetFormat = locaOffsetFormatShort + case 1: + f.locaOffsetFormat = locaOffsetFormatLong + default: + return FormatError(fmt.Sprintf("bad indexToLocFormat: %d", i)) + } + return nil +} + +func (f *Font) parseHhea() error { + if len(f.hhea) != 36 { + return FormatError(fmt.Sprintf("bad hhea length: %d", len(f.hhea))) + } + f.nHMetric = int(u16(f.hhea, 34)) + if 4*f.nHMetric+2*(f.nGlyph-f.nHMetric) != len(f.hmtx) { + return FormatError(fmt.Sprintf("bad hmtx length: %d", len(f.hmtx))) + } + return nil +} + +func (f *Font) parseKern() error { + // Apple's TrueType documentation (http://developer.apple.com/fonts/TTRefMan/RM06/Chap6kern.html) says: + // "Previous versions of the 'kern' table defined both the version and nTables fields in the header + // as UInt16 values and not UInt32 values. Use of the older format on the Mac OS is discouraged + // (although AAT can sense an old kerning table and still make correct use of it). Microsoft + // Windows still uses the older format for the 'kern' table and will not recognize the newer one. + // Fonts targeted for the Mac OS only should use the new format; fonts targeted for both the Mac OS + // and Windows should use the old format." + // Since we expect that almost all fonts aim to be Windows-compatible, we only parse the "older" format, + // just like the C Freetype implementation. + if len(f.kern) == 0 { + if f.nKern != 0 { + return FormatError("bad kern table length") + } + return nil + } + if len(f.kern) < 18 { + return FormatError("kern data too short") + } + version, offset := u16(f.kern, 0), 2 + if version != 0 { + return UnsupportedError(fmt.Sprintf("kern version: %d", version)) + } + n, offset := u16(f.kern, offset), offset+2 + if n != 1 { + return UnsupportedError(fmt.Sprintf("kern nTables: %d", n)) + } + offset += 2 + length, offset := int(u16(f.kern, offset)), offset+2 + coverage, offset := u16(f.kern, offset), offset+2 + if coverage != 0x0001 { + // We only support horizontal kerning. + return UnsupportedError(fmt.Sprintf("kern coverage: 0x%04x", coverage)) + } + f.nKern, offset = int(u16(f.kern, offset)), offset+2 + if 6*f.nKern != length-14 { + return FormatError("bad kern table length") + } + return nil +} + +func (f *Font) parseMaxp() error { + if len(f.maxp) != 32 { + return FormatError(fmt.Sprintf("bad maxp length: %d", len(f.maxp))) + } + f.nGlyph = int(u16(f.maxp, 4)) + f.maxTwilightPoints = u16(f.maxp, 16) + f.maxStorage = u16(f.maxp, 18) + f.maxFunctionDefs = u16(f.maxp, 20) + f.maxStackElements = u16(f.maxp, 24) + return nil +} + +// scale returns x divided by f.fUnitsPerEm, rounded to the nearest integer. +func (f *Font) scale(x int32) int32 { + if x >= 0 { + x += f.fUnitsPerEm / 2 + } else { + x -= f.fUnitsPerEm / 2 + } + return x / f.fUnitsPerEm +} + +// Bounds returns the union of a Font's glyphs' bounds. +func (f *Font) Bounds(scale int32) Bounds { + b := f.bounds + b.XMin = f.scale(scale * b.XMin) + b.YMin = f.scale(scale * b.YMin) + b.XMax = f.scale(scale * b.XMax) + b.YMax = f.scale(scale * b.YMax) + return b +} + +// FUnitsPerEm returns the number of FUnits in a Font's em-square's side. +func (f *Font) FUnitsPerEm() int32 { + return f.fUnitsPerEm +} + +// Index returns a Font's index for the given rune. +func (f *Font) Index(x rune) Index { + c := uint32(x) + for i, j := 0, len(f.cm); i < j; { + h := i + (j-i)/2 + cm := &f.cm[h] + if c < cm.start { + j = h + } else if cm.end < c { + i = h + 1 + } else if cm.offset == 0 { + return Index(c + cm.delta) + } else { + offset := int(cm.offset) + 2*(h-len(f.cm)+int(c-cm.start)) + return Index(u16(f.cmapIndexes, offset)) + } + } + return 0 +} + +// unscaledHMetric returns the unscaled horizontal metrics for the glyph with +// the given index. +func (f *Font) unscaledHMetric(i Index) (h HMetric) { + j := int(i) + if j < 0 || f.nGlyph <= j { + return HMetric{} + } + if j >= f.nHMetric { + p := 4 * (f.nHMetric - 1) + return HMetric{ + AdvanceWidth: int32(u16(f.hmtx, p)), + LeftSideBearing: int32(int16(u16(f.hmtx, p+2*(j-f.nHMetric)+4))), + } + } + return HMetric{ + AdvanceWidth: int32(u16(f.hmtx, 4*j)), + LeftSideBearing: int32(int16(u16(f.hmtx, 4*j+2))), + } +} + +// HMetric returns the horizontal metrics for the glyph with the given index. +func (f *Font) HMetric(scale int32, i Index) HMetric { + h := f.unscaledHMetric(i) + h.AdvanceWidth = f.scale(scale * h.AdvanceWidth) + h.LeftSideBearing = f.scale(scale * h.LeftSideBearing) + return h +} + +// unscaledVMetric returns the unscaled vertical metrics for the glyph with +// the given index. yMax is the top of the glyph's bounding box. +func (f *Font) unscaledVMetric(i Index, yMax int32) (v VMetric) { + j := int(i) + if j < 0 || f.nGlyph <= j { + return VMetric{} + } + if 4*j+4 <= len(f.vmtx) { + return VMetric{ + AdvanceHeight: int32(u16(f.vmtx, 4*j)), + TopSideBearing: int32(int16(u16(f.vmtx, 4*j+2))), + } + } + // The OS/2 table has grown over time. + // https://developer.apple.com/fonts/TTRefMan/RM06/Chap6OS2.html + // says that it was originally 68 bytes. Optional fields, including + // the ascender and descender, are described at + // http://www.microsoft.com/typography/otspec/os2.htm + if len(f.os2) >= 72 { + sTypoAscender := int32(int16(u16(f.os2, 68))) + sTypoDescender := int32(int16(u16(f.os2, 70))) + return VMetric{ + AdvanceHeight: sTypoAscender - sTypoDescender, + TopSideBearing: sTypoAscender - yMax, + } + } + return VMetric{ + AdvanceHeight: f.fUnitsPerEm, + TopSideBearing: 0, + } +} + +// VMetric returns the vertical metrics for the glyph with the given index. +func (f *Font) VMetric(scale int32, i Index) VMetric { + // TODO: should 0 be bounds.YMax? + v := f.unscaledVMetric(i, 0) + v.AdvanceHeight = f.scale(scale * v.AdvanceHeight) + v.TopSideBearing = f.scale(scale * v.TopSideBearing) + return v +} + +// Kerning returns the kerning for the given glyph pair. +func (f *Font) Kerning(scale int32, i0, i1 Index) int32 { + if f.nKern == 0 { + return 0 + } + g := uint32(i0)<<16 | uint32(i1) + lo, hi := 0, f.nKern + for lo < hi { + i := (lo + hi) / 2 + ig := u32(f.kern, 18+6*i) + if ig < g { + lo = i + 1 + } else if ig > g { + hi = i + } else { + return f.scale(scale * int32(int16(u16(f.kern, 22+6*i)))) + } + } + return 0 +} + +// Parse returns a new Font for the given TTF or TTC data. +// +// For TrueType Collections, the first font in the collection is parsed. +func Parse(ttf []byte) (font *Font, err error) { + return parse(ttf, 0) +} + +func parse(ttf []byte, offset int) (font *Font, err error) { + if len(ttf)-offset < 12 { + err = FormatError("TTF data is too short") + return + } + originalOffset := offset + magic, offset := u32(ttf, offset), offset+4 + switch magic { + case 0x00010000: + // No-op. + case 0x74746366: // "ttcf" as a big-endian uint32. + if originalOffset != 0 { + err = FormatError("recursive TTC") + return + } + ttcVersion, offset := u32(ttf, offset), offset+4 + if ttcVersion != 0x00010000 { + // TODO: support TTC version 2.0, once I have such a .ttc file to test with. + err = FormatError("bad TTC version") + return + } + numFonts, offset := int(u32(ttf, offset)), offset+4 + if numFonts <= 0 { + err = FormatError("bad number of TTC fonts") + return + } + if len(ttf[offset:])/4 < numFonts { + err = FormatError("TTC offset table is too short") + return + } + // TODO: provide an API to select which font in a TrueType collection to return, + // not just the first one. This may require an API to parse a TTC's name tables, + // so users of this package can select the font in a TTC by name. + offset = int(u32(ttf, offset)) + if offset <= 0 || offset > len(ttf) { + err = FormatError("bad TTC offset") + return + } + return parse(ttf, offset) + default: + err = FormatError("bad TTF version") + return + } + n, offset := int(u16(ttf, offset)), offset+2 + if len(ttf) < 16*n+12 { + err = FormatError("TTF data is too short") + return + } + f := new(Font) + // Assign the table slices. + for i := 0; i < n; i++ { + x := 16*i + 12 + switch string(ttf[x : x+4]) { + case "cmap": + f.cmap, err = readTable(ttf, ttf[x+8:x+16]) + case "cvt ": + f.cvt, err = readTable(ttf, ttf[x+8:x+16]) + case "fpgm": + f.fpgm, err = readTable(ttf, ttf[x+8:x+16]) + case "glyf": + f.glyf, err = readTable(ttf, ttf[x+8:x+16]) + case "hdmx": + f.hdmx, err = readTable(ttf, ttf[x+8:x+16]) + case "head": + f.head, err = readTable(ttf, ttf[x+8:x+16]) + case "hhea": + f.hhea, err = readTable(ttf, ttf[x+8:x+16]) + case "hmtx": + f.hmtx, err = readTable(ttf, ttf[x+8:x+16]) + case "kern": + f.kern, err = readTable(ttf, ttf[x+8:x+16]) + case "loca": + f.loca, err = readTable(ttf, ttf[x+8:x+16]) + case "maxp": + f.maxp, err = readTable(ttf, ttf[x+8:x+16]) + case "OS/2": + f.os2, err = readTable(ttf, ttf[x+8:x+16]) + case "prep": + f.prep, err = readTable(ttf, ttf[x+8:x+16]) + case "vmtx": + f.vmtx, err = readTable(ttf, ttf[x+8:x+16]) + } + if err != nil { + return + } + } + // Parse and sanity-check the TTF data. + if err = f.parseHead(); err != nil { + return + } + if err = f.parseMaxp(); err != nil { + return + } + if err = f.parseCmap(); err != nil { + return + } + if err = f.parseKern(); err != nil { + return + } + if err = f.parseHhea(); err != nil { + return + } + font = f + return +} diff --git a/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go new file mode 100644 index 000000000..9ef6ec8d2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go @@ -0,0 +1,366 @@ +// Copyright 2012 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "testing" +) + +func parseTestdataFont(name string) (font *Font, testdataIsOptional bool, err error) { + b, err := ioutil.ReadFile(fmt.Sprintf("../../testdata/%s.ttf", name)) + if err != nil { + // The "x-foo" fonts are optional tests, as they are not checked + // in for copyright or file size reasons. + return nil, strings.HasPrefix(name, "x-"), fmt.Errorf("%s: ReadFile: %v", name, err) + } + font, err = Parse(b) + if err != nil { + return nil, true, fmt.Errorf("%s: Parse: %v", name, err) + } + return font, false, nil +} + +// TestParse tests that the luxisr.ttf metrics and glyphs are parsed correctly. +// The numerical values can be manually verified by examining luxisr.ttx. +func TestParse(t *testing.T) { + font, _, err := parseTestdataFont("luxisr") + if err != nil { + t.Fatal(err) + } + if got, want := font.FUnitsPerEm(), int32(2048); got != want { + t.Errorf("FUnitsPerEm: got %v, want %v", got, want) + } + fupe := font.FUnitsPerEm() + if got, want := font.Bounds(fupe), (Bounds{-441, -432, 2024, 2033}); got != want { + t.Errorf("Bounds: got %v, want %v", got, want) + } + + i0 := font.Index('A') + i1 := font.Index('V') + if i0 != 36 || i1 != 57 { + t.Fatalf("Index: i0, i1 = %d, %d, want 36, 57", i0, i1) + } + if got, want := font.HMetric(fupe, i0), (HMetric{1366, 19}); got != want { + t.Errorf("HMetric: got %v, want %v", got, want) + } + if got, want := font.VMetric(fupe, i0), (VMetric{2465, 553}); got != want { + t.Errorf("VMetric: got %v, want %v", got, want) + } + if got, want := font.Kerning(fupe, i0, i1), int32(-144); got != want { + t.Errorf("Kerning: got %v, want %v", got, want) + } + + g := NewGlyphBuf() + err = g.Load(font, fupe, i0, NoHinting) + if err != nil { + t.Fatalf("Load: %v", err) + } + g0 := &GlyphBuf{ + B: g.B, + Point: g.Point, + End: g.End, + } + g1 := &GlyphBuf{ + B: Bounds{19, 0, 1342, 1480}, + Point: []Point{ + {19, 0, 51}, + {581, 1480, 1}, + {789, 1480, 51}, + {1342, 0, 1}, + {1116, 0, 35}, + {962, 410, 3}, + {368, 410, 33}, + {214, 0, 3}, + {428, 566, 19}, + {904, 566, 33}, + {667, 1200, 3}, + }, + End: []int{8, 11}, + } + if got, want := fmt.Sprint(g0), fmt.Sprint(g1); got != want { + t.Errorf("GlyphBuf:\ngot %v\nwant %v", got, want) + } +} + +func TestIndex(t *testing.T) { + testCases := map[string]map[rune]Index{ + "luxisr": { + ' ': 3, + '!': 4, + 'A': 36, + 'V': 57, + 'É': 101, + 'fl': 193, + '\u22c5': 385, + '中': 0, + }, + + // The x-etc test cases use those versions of the .ttf files provided + // by Ubuntu 14.04. See testdata/make-other-hinting-txts.sh for details. + + "x-arial-bold": { + ' ': 3, + '+': 14, + '0': 19, + '_': 66, + 'w': 90, + '~': 97, + 'Ä': 98, + 'fl': 192, + '½': 242, + 'σ': 305, + 'λ': 540, + 'ỹ': 1275, + '\u04e9': 1319, + '中': 0, + }, + "x-deja-vu-sans-oblique": { + ' ': 3, + '*': 13, + 'Œ': 276, + 'ω': 861, + '‡': 2571, + '⊕': 3110, + 'fl': 4728, + '\ufb03': 4729, + '\ufffd': 4813, + // TODO: '\U0001f640': ???, + '中': 0, + }, + "x-droid-sans-japanese": { + ' ': 0, + '\u3000': 3, + '\u3041': 25, + '\u30fe': 201, + '\uff61': 202, + '\uff67': 208, + '\uff9e': 263, + '\uff9f': 264, + '\u4e00': 265, + '\u557e': 1000, + '\u61b6': 2024, + '\u6ede': 3177, + '\u7505': 3555, + '\u81e3': 4602, + '\u81e5': 4603, + '\u81e7': 4604, + '\u81e8': 4605, + '\u81ea': 4606, + '\u81ed': 4607, + '\u81f3': 4608, + '\u81f4': 4609, + '\u91c7': 5796, + '\u9fa0': 6620, + '\u203e': 12584, + }, + "x-times-new-roman": { + ' ': 3, + ':': 29, + 'fl': 192, + 'Ŀ': 273, + '♠': 388, + 'Ŗ': 451, + 'Σ': 520, + '\u200D': 745, + 'Ẽ': 1216, + '\u04e9': 1319, + '中': 0, + }, + } + for name, wants := range testCases { + font, testdataIsOptional, err := parseTestdataFont(name) + if err != nil { + if testdataIsOptional { + t.Log(err) + } else { + t.Fatal(err) + } + continue + } + for r, want := range wants { + if got := font.Index(r); got != want { + t.Errorf("%s: Index of %q, aka %U: got %d, want %d", name, r, r, got, want) + } + } + } +} + +type scalingTestData struct { + advanceWidth int32 + bounds Bounds + points []Point +} + +// scalingTestParse parses a line of points like +// 213 -22 -111 236 555;-22 -111 1, 178 555 1, 236 555 1, 36 -111 1 +// The line will not have a trailing "\n". +func scalingTestParse(line string) (ret scalingTestData) { + next := func(s string) (string, int32) { + t, i := "", strings.Index(s, " ") + if i != -1 { + s, t = s[:i], s[i+1:] + } + x, _ := strconv.Atoi(s) + return t, int32(x) + } + + i := strings.Index(line, ";") + prefix, line := line[:i], line[i+1:] + + prefix, ret.advanceWidth = next(prefix) + prefix, ret.bounds.XMin = next(prefix) + prefix, ret.bounds.YMin = next(prefix) + prefix, ret.bounds.XMax = next(prefix) + prefix, ret.bounds.YMax = next(prefix) + + ret.points = make([]Point, 0, 1+strings.Count(line, ",")) + for len(line) > 0 { + s := line + if i := strings.Index(line, ","); i != -1 { + s, line = line[:i], line[i+1:] + for len(line) > 0 && line[0] == ' ' { + line = line[1:] + } + } else { + line = "" + } + s, x := next(s) + s, y := next(s) + s, f := next(s) + ret.points = append(ret.points, Point{X: x, Y: y, Flags: uint32(f)}) + } + return ret +} + +// scalingTestEquals is equivalent to, but faster than, calling +// reflect.DeepEquals(a, b), and also returns the index of the first non-equal +// element. It also treats a nil []Point and an empty non-nil []Point as equal. +// a and b must have equal length. +func scalingTestEquals(a, b []Point) (index int, equals bool) { + for i, p := range a { + if p != b[i] { + return i, false + } + } + return 0, true +} + +var scalingTestCases = []struct { + name string + size int32 +}{ + {"luxisr", 12}, + {"x-arial-bold", 11}, + {"x-deja-vu-sans-oblique", 17}, + {"x-droid-sans-japanese", 9}, + {"x-times-new-roman", 13}, +} + +func testScaling(t *testing.T, h Hinting) { + for _, tc := range scalingTestCases { + font, testdataIsOptional, err := parseTestdataFont(tc.name) + if err != nil { + if testdataIsOptional { + t.Log(err) + } else { + t.Error(err) + } + continue + } + hintingStr := "sans" + if h != NoHinting { + hintingStr = "with" + } + f, err := os.Open(fmt.Sprintf( + "../../testdata/%s-%dpt-%s-hinting.txt", tc.name, tc.size, hintingStr)) + if err != nil { + t.Errorf("%s: Open: %v", tc.name, err) + continue + } + defer f.Close() + + wants := []scalingTestData{} + scanner := bufio.NewScanner(f) + if scanner.Scan() { + major, minor, patch := 0, 0, 0 + _, err := fmt.Sscanf(scanner.Text(), "freetype version %d.%d.%d", &major, &minor, &patch) + if err != nil { + t.Errorf("%s: version information: %v", tc.name, err) + } + if (major < 2) || (major == 2 && minor < 5) || (major == 2 && minor == 5 && patch < 1) { + t.Errorf("%s: need freetype version >= 2.5.1.\n"+ + "Try setting LD_LIBRARY_PATH=/path/to/freetype_built_from_src/objs/.libs/\n"+ + "and re-running testdata/make-other-hinting-txts.sh", + tc.name) + continue + } + } else { + t.Errorf("%s: no version information", tc.name) + continue + } + for scanner.Scan() { + wants = append(wants, scalingTestParse(scanner.Text())) + } + if err := scanner.Err(); err != nil && err != io.EOF { + t.Errorf("%s: Scanner: %v", tc.name, err) + continue + } + + glyphBuf := NewGlyphBuf() + for i, want := range wants { + if err = glyphBuf.Load(font, tc.size*64, Index(i), h); err != nil { + t.Errorf("%s: glyph #%d: Load: %v", tc.name, i, err) + continue + } + got := scalingTestData{ + advanceWidth: glyphBuf.AdvanceWidth, + bounds: glyphBuf.B, + points: glyphBuf.Point, + } + + if got.advanceWidth != want.advanceWidth { + t.Errorf("%s: glyph #%d advance width:\ngot %v\nwant %v", + tc.name, i, got.advanceWidth, want.advanceWidth) + continue + } + + if got.bounds != want.bounds { + t.Errorf("%s: glyph #%d bounds:\ngot %v\nwant %v", + tc.name, i, got.bounds, want.bounds) + continue + } + + for i := range got.points { + got.points[i].Flags &= 0x01 + } + if len(got.points) != len(want.points) { + t.Errorf("%s: glyph #%d:\ngot %v\nwant %v\ndifferent slice lengths: %d versus %d", + tc.name, i, got.points, want.points, len(got.points), len(want.points)) + continue + } + if j, equals := scalingTestEquals(got.points, want.points); !equals { + t.Errorf("%s: glyph #%d:\ngot %v\nwant %v\nat index %d: %v versus %v", + tc.name, i, got.points, want.points, j, got.points[j], want.points[j]) + continue + } + } + } +} + +func TestScalingSansHinting(t *testing.T) { + testScaling(t, NoHinting) +} + +func TestScalingWithHinting(t *testing.T) { + testScaling(t, FullHinting) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go new file mode 100644 index 000000000..50a0f2d09 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go new file mode 100644 index 000000000..d8bd013e6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go new file mode 100644 index 000000000..cdd4192fd --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go new file mode 100644 index 000000000..760580a50 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go @@ -0,0 +1,30 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) == 0 { + return []byte(`""`), nil + } + return []byte(`"` + u.String() + `"`), nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == `""` { + return nil + } + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go new file mode 100644 index 000000000..b5eae0924 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go @@ -0,0 +1,32 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/json" + "reflect" + "testing" +) + +var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + +func TestJSON(t *testing.T) { + type S struct { + ID1 UUID + ID2 UUID + } + s1 := S{ID1: testUUID} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go new file mode 100644 index 000000000..dd0a8ac18 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go new file mode 100644 index 000000000..3b3d1430d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go @@ -0,0 +1,66 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "flag" + "runtime" + "testing" + "time" +) + +// This test is only run when --regressions is passed on the go test line. +var regressions = flag.Bool("regressions", false, "run uuid regression tests") + +// TestClockSeqRace tests for a particular race condition of returning two +// identical Version1 UUIDs. The duration of 1 minute was chosen as the race +// condition, before being fixed, nearly always occured in under 30 seconds. +func TestClockSeqRace(t *testing.T) { + if !*regressions { + t.Skip("skipping regression tests") + } + duration := time.Minute + + done := make(chan struct{}) + defer close(done) + + ch := make(chan UUID, 10000) + ncpu := runtime.NumCPU() + switch ncpu { + case 0, 1: + // We can't run the test effectively. + t.Skip("skipping race test, only one CPU detected") + return + default: + runtime.GOMAXPROCS(ncpu) + } + for i := 0; i < ncpu; i++ { + go func() { + for { + select { + case <-done: + return + case ch <- NewUUID(): + } + } + }() + } + + uuids := make(map[string]bool) + cnt := 0 + start := time.Now() + for u := range ch { + s := u.String() + if uuids[s] { + t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) + return + } + uuids[s] = true + if time.Since(start) > duration { + return + } + cnt++ + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go new file mode 100644 index 000000000..7ebc9bef1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go new file mode 100644 index 000000000..de40b102c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go new file mode 100644 index 000000000..2920fae63 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go new file mode 100644 index 000000000..417ebeb26 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go @@ -0,0 +1,390 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid := Parse(in) + if ok := (uuid != nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if uuid == nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v, _ := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := NewRandom() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s\n", s) + } + m[s] = true + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s\n", s) + } + m[s] = true + uuid := Parse(s) + if uuid == nil { + t.Errorf("New returned %q which does not decode\n", s) + continue + } + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func clockSeq(t *testing.T, uuid UUID) int { + seq, ok := uuid.ClockSequence() + if !ok { + t.Fatalf("%s: invalid clock sequence\n", uuid) + } + return seq +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1 := NewUUID() + uuid2 := NewUUID() + + if clockSeq(t, uuid1) != clockSeq(t, uuid2) { + t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) + } + + SetClockSequence(-1) + uuid2 = NewUUID() + + // Just on the very off chance we generated the same sequence + // two times we try again. + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + SetClockSequence(-1) + uuid2 = NewUUID() + } + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) + } + + SetClockSequence(0x1234) + uuid1 = NewUUID() + if seq := clockSeq(t, uuid1); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + } + + uuid := Parse(text) + if !Equal(uuid, data) { + t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1 := NewUUID() + uuid2 := NewUUID() + + if Equal(uuid1, uuid2) { + t.Errorf("%s:duplicate uuid\n", uuid1) + } + if v, _ := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid1, v) + } + if v, _ := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x\n", n1, n2) + } + t1, ok := uuid1.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid1) + } + t2, ok := uuid2.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid2) + } + q1, ok := uuid1.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence\n", uuid1) + } + q2, ok := uuid2.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence", uuid2) + } + + switch { + case t1 == t2 && q1 == q2: + t.Errorf("time stopped\n") + case t1 > t2 && q1 == q2: + t.Errorf("time reversed\n") + case t1 < t2 && q1 != q2: + t.Errorf("clock sequence chaned unexpectedly\n") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts, ok := uuid.Time() + if ok { + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + } else { + t.Errorf("%s: bad time\n", uuid) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q\n", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q\n", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface\n", s) + } + node1 := NodeID() + if node1 == nil { + t.Errorf("NodeID nil after SetNodeInterface\n", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q\n", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Errorf("NodeID nil after SetNodeID\n", s) + } + if bytes.Equal(node1, node2) { + t.Errorf("NodeID not changed after SetNodeID\n", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x\n", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { + if uuid == nil { + t.Errorf("%s failed\n", name) + return + } + if v, _ := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + return + } + if v, ok := uuid.Domain(); !ok || v != domain { + if !ok { + t.Errorf("%s: %d: Domain failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + } + } + if v, ok := uuid.Id(); !ok || v != id { + if !ok { + t.Errorf("%s: %d: Id failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + } + } +} + +func TestDCE(t *testing.T) { + testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) + testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) + testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q\n", uuid1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go new file mode 100644 index 000000000..0127eacfa --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go new file mode 100644 index 000000000..b3d4a368d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go new file mode 100644 index 000000000..fc3116090 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go new file mode 100644 index 000000000..b8e18d744 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go new file mode 100644 index 000000000..f08a6f5b2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import ( + "bytes" + "fmt" + "testing" +) + +func TestBcryptingIsEasy(t *testing.T) { + pass := []byte("mypassword") + hp, err := GenerateFromPassword(pass, 0) + if err != nil { + t.Fatalf("GenerateFromPassword error: %s", err) + } + + if CompareHashAndPassword(hp, pass) != nil { + t.Errorf("%v should hash %s correctly", hp, pass) + } + + notPass := "notthepass" + err = CompareHashAndPassword(hp, []byte(notPass)) + if err != ErrMismatchedHashAndPassword { + t.Errorf("%v and %s should be mismatched", hp, notPass) + } +} + +func TestBcryptingIsCorrect(t *testing.T) { + pass := []byte("allmine") + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") + + hash, err := bcrypt(pass, 10, salt) + if err != nil { + t.Fatalf("bcrypt blew up: %v", err) + } + if !bytes.HasSuffix(expectedHash, hash) { + t.Errorf("%v should be the suffix of %v", hash, expectedHash) + } + + h, err := newFromHash(expectedHash) + if err != nil { + t.Errorf("Unable to parse %s: %v", string(expectedHash), err) + } + + // This is not the safe way to compare these hashes. We do this only for + // testing clarity. Use bcrypt.CompareHashAndPassword() + if err == nil && !bytes.Equal(expectedHash, h.Hash()) { + t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash) + } +} + +func TestVeryShortPasswords(t *testing.T) { + key := []byte("k") + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + _, err := bcrypt(key, 10, salt) + if err != nil { + t.Errorf("One byte key resulted in error: %s", err) + } +} + +func TestTooLongPasswordsWork(t *testing.T) { + salt := []byte("XajjQvNhvvRt5GSeFk1xFe") + // One byte over the usual 56 byte limit that blowfish has + tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456") + tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C") + hash, err := bcrypt(tooLongPass, 10, salt) + if err != nil { + t.Fatalf("bcrypt blew up on long password: %v", err) + } + if !bytes.HasSuffix(tooLongExpected, hash) { + t.Errorf("%v should be the suffix of %v", hash, tooLongExpected) + } +} + +type InvalidHashTest struct { + err error + hash []byte +} + +var invalidTests = []InvalidHashTest{ + {ErrHashTooShort, []byte("$2a$10$fooo")}, + {ErrHashTooShort, []byte("$2a")}, + {HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, + {InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, + {InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, +} + +func TestInvalidHashErrors(t *testing.T) { + check := func(name string, expected, err error) { + if err == nil { + t.Errorf("%s: Should have returned an error", name) + } + if err != nil && err != expected { + t.Errorf("%s gave err %v but should have given %v", name, err, expected) + } + } + for _, iht := range invalidTests { + _, err := newFromHash(iht.hash) + check("newFromHash", iht.err, err) + err = CompareHashAndPassword(iht.hash, []byte("anything")) + check("CompareHashAndPassword", iht.err, err) + } +} + +func TestUnpaddedBase64Encoding(t *testing.T) { + original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30} + encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe") + + encoded := base64Encode(original) + + if !bytes.Equal(encodedOriginal, encoded) { + t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal) + } + + decoded, err := base64Decode(encodedOriginal) + if err != nil { + t.Fatalf("base64Decode blew up: %s", err) + } + + if !bytes.Equal(decoded, original) { + t.Errorf("Decoded %v should have equaled %v", decoded, original) + } +} + +func TestCost(t *testing.T) { + suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C" + for _, vers := range []string{"2a", "2"} { + for _, cost := range []int{4, 10} { + s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix) + h := []byte(s) + actual, err := Cost(h) + if err != nil { + t.Errorf("Cost, error: %s", err) + continue + } + if actual != cost { + t.Errorf("Cost, expected: %d, actual: %d", cost, actual) + } + } + } + _, err := Cost([]byte("$a$a$" + suffix)) + if err == nil { + t.Errorf("Cost, malformed but no error returned") + } +} + +func TestCostValidationInHash(t *testing.T) { + if testing.Short() { + return + } + + pass := []byte("mypassword") + + for c := 0; c < MinCost; c++ { + p, _ := newFromPassword(pass, c) + if p.cost != DefaultCost { + t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost) + } + } + + p, _ := newFromPassword(pass, 14) + if p.cost != 14 { + t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost) + } + + hp, _ := newFromHash(p.Hash()) + if p.cost != hp.cost { + t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost) + } + + _, err := newFromPassword(pass, 32) + if err == nil { + t.Fatalf("newFromPassword: should return a cost error") + } + if err != InvalidCostError(32) { + t.Errorf("newFromPassword: should return cost error, got %#v", err) + } +} + +func TestCostReturnsWithLeadingZeroes(t *testing.T) { + hp, _ := newFromPassword([]byte("abcdefgh"), 7) + cost := hp.Hash()[4:7] + expected := []byte("07$") + + if !bytes.Equal(expected, cost) { + t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected) + } +} + +func TestMinorNotRequired(t *testing.T) { + noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") + h, err := newFromHash(noMinorHash) + if err != nil { + t.Fatalf("No minor hash blew up: %s", err) + } + if h.minor != 0 { + t.Errorf("Should leave minor version at 0, but was %d", h.minor) + } + + if !bytes.Equal(noMinorHash, h.Hash()) { + t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash()) + } +} + +func BenchmarkEqual(b *testing.B) { + b.StopTimer() + passwd := []byte("somepasswordyoulike") + hash, _ := GenerateFromPassword(passwd, 10) + b.StartTimer() + for i := 0; i < b.N; i++ { + CompareHashAndPassword(hash, passwd) + } +} + +func BenchmarkGeneration(b *testing.B) { + b.StopTimer() + passwd := []byte("mylongpassword1234") + b.StartTimer() + for i := 0; i < b.N; i++ { + GenerateFromPassword(passwd, 10) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/.hgtags b/Godeps/_workspace/src/code.google.com/p/log4go/.hgtags new file mode 100644 index 000000000..72a2eea2c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/.hgtags @@ -0,0 +1,4 @@ +4fbe6aadba231e838a449d340e43bdaab0bf85bd go.weekly.2012-02-07 +56168fd53249d639c25c74ced881fffb20d27be9 go.weekly.2012-02-22 +56168fd53249d639c25c74ced881fffb20d27be9 go.weekly.2012-02-22 +5c22fbd77d91f54d76cdbdee05318699754c44cc go.weekly.2012-02-22 diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/LICENSE b/Godeps/_workspace/src/code.google.com/p/log4go/LICENSE new file mode 100644 index 000000000..7093402bf --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2010, Kyle Lemons . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/README b/Godeps/_workspace/src/code.google.com/p/log4go/README new file mode 100644 index 000000000..16d80ecb7 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/README @@ -0,0 +1,12 @@ +Please see http://log4go.googlecode.com/ + +Installation: +- Run `goinstall log4go.googlecode.com/hg` + +Usage: +- Add the following import: +import l4g "log4go.googlecode.com/hg" + +Acknowledgements: +- pomack + For providing awesome patches to bring log4go up to the latest Go spec diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/config.go b/Godeps/_workspace/src/code.google.com/p/log4go/config.go new file mode 100644 index 000000000..f048b69f5 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/config.go @@ -0,0 +1,288 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +type xmlProperty struct { + Name string `xml:"name,attr"` + Value string `xml:",chardata"` +} + +type xmlFilter struct { + Enabled string `xml:"enabled,attr"` + Tag string `xml:"tag"` + Level string `xml:"level"` + Type string `xml:"type"` + Property []xmlProperty `xml:"property"` +} + +type xmlLoggerConfig struct { + Filter []xmlFilter `xml:"filter"` +} + +// Load XML configuration; see examples/example.xml for documentation +func (log Logger) LoadConfiguration(filename string) { + log.Close() + + // Open the configuration file + fd, err := os.Open(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err) + os.Exit(1) + } + + contents, err := ioutil.ReadAll(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err) + os.Exit(1) + } + + xc := new(xmlLoggerConfig) + if err := xml.Unmarshal(contents, xc); err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err) + os.Exit(1) + } + + for _, xmlfilt := range xc.Filter { + var filt LogWriter + var lvl level + bad, good, enabled := false, true, false + + // Check required children + if len(xmlfilt.Enabled) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename) + bad = true + } else { + enabled = xmlfilt.Enabled != "false" + } + if len(xmlfilt.Tag) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename) + bad = true + } + if len(xmlfilt.Type) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename) + bad = true + } + if len(xmlfilt.Level) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename) + bad = true + } + + switch xmlfilt.Level { + case "FINEST": + lvl = FINEST + case "FINE": + lvl = FINE + case "DEBUG": + lvl = DEBUG + case "TRACE": + lvl = TRACE + case "INFO": + lvl = INFO + case "WARNING": + lvl = WARNING + case "ERROR": + lvl = ERROR + case "CRITICAL": + lvl = CRITICAL + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level) + bad = true + } + + // Just so all of the required attributes are errored at the same time if missing + if bad { + os.Exit(1) + } + + switch xmlfilt.Type { + case "console": + filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled) + case "file": + filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled) + case "xml": + filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled) + case "socket": + filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled) + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type) + os.Exit(1) + } + + // Just so all of the required params are errored at the same time if wrong + if !good { + os.Exit(1) + } + + // If we're disabled (syntax and correctness checks only), don't add to logger + if !enabled { + continue + } + + log[xmlfilt.Tag] = &Filter{lvl, filt} + } +} + +func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (ConsoleLogWriter, bool) { + // Parse properties + for _, prop := range props { + switch prop.Name { + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename) + } + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + return NewConsoleLogWriter(), true +} + +// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024) +func strToNumSuffix(str string, mult int) int { + num := 1 + if len(str) > 1 { + switch str[len(str)-1] { + case 'G', 'g': + num *= mult + fallthrough + case 'M', 'm': + num *= mult + fallthrough + case 'K', 'k': + num *= mult + str = str[0 : len(str)-1] + } + } + parsed, _ := strconv.Atoi(str) + return parsed * num +} +func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { + file := "" + format := "[%D %T] [%L] (%S) %M" + maxlines := 0 + maxsize := 0 + daily := false + rotate := false + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "filename": + file = strings.Trim(prop.Value, " \r\n") + case "format": + format = strings.Trim(prop.Value, " \r\n") + case "maxlines": + maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) + case "maxsize": + maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) + case "daily": + daily = strings.Trim(prop.Value, " \r\n") != "false" + case "rotate": + rotate = strings.Trim(prop.Value, " \r\n") != "false" + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(file) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + flw := NewFileLogWriter(file, rotate) + flw.SetFormat(format) + flw.SetRotateLines(maxlines) + flw.SetRotateSize(maxsize) + flw.SetRotateDaily(daily) + return flw, true +} + +func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { + file := "" + maxrecords := 0 + maxsize := 0 + daily := false + rotate := false + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "filename": + file = strings.Trim(prop.Value, " \r\n") + case "maxrecords": + maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) + case "maxsize": + maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) + case "daily": + daily = strings.Trim(prop.Value, " \r\n") != "false" + case "rotate": + rotate = strings.Trim(prop.Value, " \r\n") != "false" + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(file) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + xlw := NewXMLLogWriter(file, rotate) + xlw.SetRotateLines(maxrecords) + xlw.SetRotateSize(maxsize) + xlw.SetRotateDaily(daily) + return xlw, true +} + +func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) { + endpoint := "" + protocol := "udp" + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "endpoint": + endpoint = strings.Trim(prop.Value, " \r\n") + case "protocol": + protocol = strings.Trim(prop.Value, " \r\n") + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(endpoint) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + return NewSocketLogWriter(protocol, endpoint), true +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go b/Godeps/_workspace/src/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go new file mode 100644 index 000000000..394ca8380 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go @@ -0,0 +1,13 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter()) + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/FileLogWriter_Manual.go b/Godeps/_workspace/src/code.google.com/p/log4go/examples/FileLogWriter_Manual.go new file mode 100644 index 000000000..efd596aa6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/FileLogWriter_Manual.go @@ -0,0 +1,57 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "time" +) + +import l4g "code.google.com/p/log4go" + +const ( + filename = "flw.log" +) + +func main() { + // Get a new logger instance + log := l4g.NewLogger() + + // Create a default logger that is logging messages of FINE or higher + log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false)) + log.Close() + + /* Can also specify manually via the following: (these are the defaults) */ + flw := l4g.NewFileLogWriter(filename, false) + flw.SetFormat("[%D %T] [%L] (%S) %M") + flw.SetRotate(false) + flw.SetRotateSize(0) + flw.SetRotateLines(0) + flw.SetRotateDaily(false) + log.AddFilter("file", l4g.FINE, flw) + + // Log some experimental messages + log.Finest("Everything is created now (notice that I will not be printing to the file)") + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + log.Critical("Time to close out!") + + // Close the log + log.Close() + + // Print what was logged to the file (yes, I know I'm skipping error checking) + fd, _ := os.Open(filename) + in := bufio.NewReader(fd) + fmt.Print("Messages logged to file were: (line numbers not included)\n") + for lineno := 1; ; lineno++ { + line, err := in.ReadString('\n') + if err == io.EOF { + break + } + fmt.Printf("%3d:\t%s", lineno, line) + } + fd.Close() + + // Remove the file so it's not lying around + os.Remove(filename) +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/SimpleNetLogServer.go b/Godeps/_workspace/src/code.google.com/p/log4go/examples/SimpleNetLogServer.go new file mode 100644 index 000000000..83c80ad12 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/SimpleNetLogServer.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" +) + +var ( + port = flag.String("p", "12124", "Port number to listen on") +) + +func e(err error) { + if err != nil { + fmt.Printf("Erroring out: %s\n", err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + // Bind to the port + bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port) + e(err) + + // Create listener + listener, err := net.ListenUDP("udp", bind) + e(err) + + fmt.Printf("Listening to port %s...\n", *port) + for { + // read into a new buffer + buffer := make([]byte, 1024) + _, _, err := listener.ReadFrom(buffer) + e(err) + + // log to standard output + fmt.Println(string(buffer)) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go b/Godeps/_workspace/src/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go new file mode 100644 index 000000000..400b698ca --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go @@ -0,0 +1,18 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124")) + + // Run `nc -u -l -p 12124` or similar before you run this to see the following message + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + + // This makes sure the output stream buffer is written + log.Close() +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/XMLConfigurationExample.go b/Godeps/_workspace/src/code.google.com/p/log4go/examples/XMLConfigurationExample.go new file mode 100644 index 000000000..164c2add4 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/XMLConfigurationExample.go @@ -0,0 +1,13 @@ +package main + +import l4g "code.google.com/p/log4go" + +func main() { + // Load the configuration (isn't this easy?) + l4g.LoadConfiguration("example.xml") + + // And now we're ready! + l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.") + l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2) + l4g.Info("About that time, eh chaps?") +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/examples/example.xml b/Godeps/_workspace/src/code.google.com/p/log4go/examples/example.xml new file mode 100644 index 000000000..e791278ce --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/examples/example.xml @@ -0,0 +1,47 @@ + + + stdout + console + + DEBUG + + + file + file + FINEST + test.log + + [%D %T] [%L] (%S) %M + false + 0M + 0K + true + + + xmllog + xml + TRACE + trace.xml + true + 100M + 6K + false + + + donotopen + socket + FINEST + 192.168.1.255:12124 + udp + + diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/filelog.go b/Godeps/_workspace/src/code.google.com/p/log4go/filelog.go new file mode 100644 index 000000000..9cbd815d9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/filelog.go @@ -0,0 +1,239 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "os" + "fmt" + "time" +) + +// This log writer sends output to a file +type FileLogWriter struct { + rec chan *LogRecord + rot chan bool + + // The opened file + filename string + file *os.File + + // The logging format + format string + + // File header/trailer + header, trailer string + + // Rotate at linecount + maxlines int + maxlines_curlines int + + // Rotate at size + maxsize int + maxsize_cursize int + + // Rotate daily + daily bool + daily_opendate int + + // Keep old logfiles (.001, .002, etc) + rotate bool +} + +// This is the FileLogWriter's output method +func (w *FileLogWriter) LogWrite(rec *LogRecord) { + w.rec <- rec +} + +func (w *FileLogWriter) Close() { + close(w.rec) +} + +// NewFileLogWriter creates a new LogWriter which writes to the given file and +// has rotation enabled if rotate is true. +// +// If rotate is true, any time a new log file is opened, the old one is renamed +// with a .### extension to preserve it. The various Set* methods can be used +// to configure log rotation based on lines, size, and daily. +// +// The standard log-line format is: +// [%D %T] [%L] (%S) %M +func NewFileLogWriter(fname string, rotate bool) *FileLogWriter { + w := &FileLogWriter{ + rec: make(chan *LogRecord, LogBufferLength), + rot: make(chan bool), + filename: fname, + format: "[%D %T] [%L] (%S) %M", + rotate: rotate, + } + + // open the file for the first time + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return nil + } + + go func() { + defer func() { + if w.file != nil { + fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) + w.file.Close() + } + }() + + for { + select { + case <-w.rot: + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + case rec, ok := <-w.rec: + if !ok { + return + } + now := time.Now() + if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) || + (w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) || + (w.daily && now.Day() != w.daily_opendate) { + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + } + + // Perform the write + n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec)) + if err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + + // Update the counts + w.maxlines_curlines++ + w.maxsize_cursize += n + } + } + }() + + return w +} + +// Request that the logs rotate +func (w *FileLogWriter) Rotate() { + w.rot <- true +} + +// If this is called in a threaded context, it MUST be synchronized +func (w *FileLogWriter) intRotate() error { + // Close any log file that may be open + if w.file != nil { + fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) + w.file.Close() + } + + // If we are keeping log files, move it to the next available number + if w.rotate { + _, err := os.Lstat(w.filename) + if err == nil { // file exists + // Find the next available number + num := 1 + fname := "" + for ; err == nil && num <= 999; num++ { + fname = w.filename + fmt.Sprintf(".%03d", num) + _, err = os.Lstat(fname) + } + // return error if the last file checked still existed + if err == nil { + return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename) + } + + // Rename the file to its newfound home + err = os.Rename(w.filename, fname) + if err != nil { + return fmt.Errorf("Rotate: %s\n", err) + } + } + } + + // Open the log file + fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) + if err != nil { + return err + } + w.file = fd + + now := time.Now() + fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now})) + + // Set the daily open date to the current date + w.daily_opendate = now.Day() + + // initialize rotation values + w.maxlines_curlines = 0 + w.maxsize_cursize = 0 + + return nil +} + +// Set the logging format (chainable). Must be called before the first log +// message is written. +func (w *FileLogWriter) SetFormat(format string) *FileLogWriter { + w.format = format + return w +} + +// Set the logfile header and footer (chainable). Must be called before the first log +// message is written. These are formatted similar to the FormatLogRecord (e.g. +// you can use %D and %T in your header/footer for date and time). +func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter { + w.header, w.trailer = head, foot + if w.maxlines_curlines == 0 { + fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()})) + } + return w +} + +// Set rotate at linecount (chainable). Must be called before the first log +// message is written. +func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines) + w.maxlines = maxlines + return w +} + +// Set rotate at size (chainable). Must be called before the first log message +// is written. +func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize) + w.maxsize = maxsize + return w +} + +// Set rotate daily (chainable). Must be called before the first log message is +// written. +func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily) + w.daily = daily + return w +} + +// SetRotate changes whether or not the old logs are kept. (chainable) Must be +// called before the first log message is written. If rotate is false, the +// files are overwritten; otherwise, they are rotated to another file before the +// new log is opened. +func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate) + w.rotate = rotate + return w +} + +// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to +// output XML record log messages instead of line-based ones. +func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter { + return NewFileLogWriter(fname, rotate).SetFormat( + ` + %D %T + %S + %M + `).SetHeadFoot("", "") +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/log4go.go b/Godeps/_workspace/src/code.google.com/p/log4go/log4go.go new file mode 100644 index 000000000..ab4e857f5 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/log4go.go @@ -0,0 +1,484 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +// Package log4go provides level-based and highly configurable logging. +// +// Enhanced Logging +// +// This is inspired by the logging functionality in Java. Essentially, you create a Logger +// object and create output filters for it. You can send whatever you want to the Logger, +// and it will filter that based on your settings and send it to the outputs. This way, you +// can put as much debug code in your program as you want, and when you're done you can filter +// out the mundane messages so only the important ones show up. +// +// Utility functions are provided to make life easier. Here is some example code to get started: +// +// log := log4go.NewLogger() +// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter()) +// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) +// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) +// +// The first two lines can be combined with the utility NewDefaultLogger: +// +// log := log4go.NewDefaultLogger(log4go.DEBUG) +// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) +// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) +// +// Usage notes: +// - The ConsoleLogWriter does not display the source of the message to standard +// output, but the FileLogWriter does. +// - The utility functions (Info, Debug, Warn, etc) derive their source from the +// calling function, and this incurs extra overhead. +// +// Changes from 2.0: +// - The external interface has remained mostly stable, but a lot of the +// internals have been changed, so if you depended on any of this or created +// your own LogWriter, then you will probably have to update your code. In +// particular, Logger is now a map and ConsoleLogWriter is now a channel +// behind-the-scenes, and the LogWrite method no longer has return values. +// +// Future work: (please let me know if you think I should work on any of these particularly) +// - Log file rotation +// - Logging configuration files ala log4j +// - Have the ability to remove filters? +// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows +// for another method of logging +// - Add an XML filter type +package log4go + +import ( + "errors" + "os" + "fmt" + "time" + "strings" + "runtime" +) + +// Version information +const ( + L4G_VERSION = "log4go-v3.0.1" + L4G_MAJOR = 3 + L4G_MINOR = 0 + L4G_BUILD = 1 +) + +/****** Constants ******/ + +// These are the integer logging levels used by the logger +type level int + +const ( + FINEST level = iota + FINE + DEBUG + TRACE + INFO + WARNING + ERROR + CRITICAL +) + +// Logging level strings +var ( + levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} +) + +func (l level) String() string { + if l < 0 || int(l) > len(levelStrings) { + return "UNKNOWN" + } + return levelStrings[int(l)] +} + +/****** Variables ******/ +var ( + // LogBufferLength specifies how many log messages a particular log4go + // logger can buffer at a time before writing them. + LogBufferLength = 32 +) + +/****** LogRecord ******/ + +// A LogRecord contains all of the pertinent information for each message +type LogRecord struct { + Level level // The log level + Created time.Time // The time at which the log message was created (nanoseconds) + Source string // The message source + Message string // The log message +} + +/****** LogWriter ******/ + +// This is an interface for anything that should be able to write logs +type LogWriter interface { + // This will be called to log a LogRecord message. + LogWrite(rec *LogRecord) + + // This should clean up anything lingering about the LogWriter, as it is called before + // the LogWriter is removed. LogWrite should not be called after Close. + Close() +} + +/****** Logger ******/ + +// A Filter represents the log level below which no log records are written to +// the associated LogWriter. +type Filter struct { + Level level + LogWriter +} + +// A Logger represents a collection of Filters through which log messages are +// written. +type Logger map[string]*Filter + +// Create a new logger. +// +// DEPRECATED: Use make(Logger) instead. +func NewLogger() Logger { + os.Stderr.WriteString("warning: use of deprecated NewLogger\n") + return make(Logger) +} + +// Create a new logger with a "stdout" filter configured to send log messages at +// or above lvl to standard output. +// +// DEPRECATED: use NewDefaultLogger instead. +func NewConsoleLogger(lvl level) Logger { + os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n") + return Logger{ + "stdout": &Filter{lvl, NewConsoleLogWriter()}, + } +} + +// Create a new logger with a "stdout" filter configured to send log messages at +// or above lvl to standard output. +func NewDefaultLogger(lvl level) Logger { + return Logger{ + "stdout": &Filter{lvl, NewConsoleLogWriter()}, + } +} + +// Closes all log writers in preparation for exiting the program or a +// reconfiguration of logging. Calling this is not really imperative, unless +// you want to guarantee that all log messages are written. Close removes +// all filters (and thus all LogWriters) from the logger. +func (log Logger) Close() { + // Close all open loggers + for name, filt := range log { + filt.Close() + delete(log, name) + } +} + +// Add a new LogWriter to the Logger which will only log messages at lvl or +// higher. This function should not be called from multiple goroutines. +// Returns the logger for chaining. +func (log Logger) AddFilter(name string, lvl level, writer LogWriter) Logger { + log[name] = &Filter{lvl, writer} + return log +} + +/******* Logging *******/ +// Send a formatted log message internally +func (log Logger) intLogf(lvl level, format string, args ...interface{}) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Determine caller func + pc, _, lineno, ok := runtime.Caller(2) + src := "" + if ok { + src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) + } + + msg := format + if len(args) > 0 { + msg = fmt.Sprintf(format, args...) + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: src, + Message: msg, + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Send a closure log message internally +func (log Logger) intLogc(lvl level, closure func() string) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Determine caller func + pc, _, lineno, ok := runtime.Caller(2) + src := "" + if ok { + src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: src, + Message: closure(), + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Send a log message with manual level, source, and message. +func (log Logger) Log(lvl level, source, message string) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: source, + Message: message, + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Logf logs a formatted log message at the given log level, using the caller as +// its source. +func (log Logger) Logf(lvl level, format string, args ...interface{}) { + log.intLogf(lvl, format, args...) +} + +// Logc logs a string returned by the closure at the given log level, using the caller as +// its source. If no log message would be written, the closure is never called. +func (log Logger) Logc(lvl level, closure func() string) { + log.intLogc(lvl, closure) +} + +// Finest logs a message at the finest log level. +// See Debug for an explanation of the arguments. +func (log Logger) Finest(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINEST + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Fine logs a message at the fine log level. +// See Debug for an explanation of the arguments. +func (log Logger) Fine(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Debug is a utility method for debug log messages. +// The behavior of Debug depends on the first argument: +// - arg0 is a string +// When given a string as the first argument, this behaves like Logf but with +// the DEBUG log level: the first argument is interpreted as a format for the +// latter arguments. +// - arg0 is a func()string +// When given a closure of type func()string, this logs the string returned by +// the closure iff it will be logged. The closure runs at most one time. +// - arg0 is interface{} +// When given anything else, the log message will be each of the arguments +// formatted with %v and separated by spaces (ala Sprint). +func (log Logger) Debug(arg0 interface{}, args ...interface{}) { + const ( + lvl = DEBUG + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Trace logs a message at the trace log level. +// See Debug for an explanation of the arguments. +func (log Logger) Trace(arg0 interface{}, args ...interface{}) { + const ( + lvl = TRACE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Info logs a message at the info log level. +// See Debug for an explanation of the arguments. +func (log Logger) Info(arg0 interface{}, args ...interface{}) { + const ( + lvl = INFO + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Warn logs a message at the warning log level and returns the formatted error. +// At the warning level and higher, there is no performance benefit if the +// message is not actually logged, because all formats are processed and all +// closures are executed to format the error message. +// See Debug for further explanation of the arguments. +func (log Logger) Warn(arg0 interface{}, args ...interface{}) error { + const ( + lvl = WARNING + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} + +// Error logs a message at the error log level and returns the formatted error, +// See Warn for an explanation of the performance and Debug for an explanation +// of the parameters. +func (log Logger) Error(arg0 interface{}, args ...interface{}) error { + const ( + lvl = ERROR + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} + +// Critical logs a message at the critical log level and returns the formatted error, +// See Warn for an explanation of the performance and Debug for an explanation +// of the parameters. +func (log Logger) Critical(arg0 interface{}, args ...interface{}) error { + const ( + lvl = CRITICAL + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/log4go_test.go b/Godeps/_workspace/src/code.google.com/p/log4go/log4go_test.go new file mode 100644 index 000000000..90c629977 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/log4go_test.go @@ -0,0 +1,534 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "testing" + "time" +) + +const testLogFile = "_logtest.log" + +var now time.Time = time.Unix(0, 1234567890123456789).In(time.UTC) + +func newLogRecord(lvl level, src string, msg string) *LogRecord { + return &LogRecord{ + Level: lvl, + Source: src, + Created: now, + Message: msg, + } +} + +func TestELog(t *testing.T) { + fmt.Printf("Testing %s\n", L4G_VERSION) + lr := newLogRecord(CRITICAL, "source", "message") + if lr.Level != CRITICAL { + t.Errorf("Incorrect level: %d should be %d", lr.Level, CRITICAL) + } + if lr.Source != "source" { + t.Errorf("Incorrect source: %s should be %s", lr.Source, "source") + } + if lr.Message != "message" { + t.Errorf("Incorrect message: %s should be %s", lr.Source, "message") + } +} + +var formatTests = []struct { + Test string + Record *LogRecord + Formats map[string]string +}{ + { + Test: "Standard formats", + Record: &LogRecord{ + Level: ERROR, + Source: "source", + Message: "message", + Created: now, + }, + Formats: map[string]string{ + // TODO(kevlar): How can I do this so it'll work outside of PST? + FORMAT_DEFAULT: "[2009/02/13 23:31:30 UTC] [EROR] (source) message\n", + FORMAT_SHORT: "[23:31 02/13/09] [EROR] message\n", + FORMAT_ABBREV: "[EROR] message\n", + }, + }, +} + +func TestFormatLogRecord(t *testing.T) { + for _, test := range formatTests { + name := test.Test + for fmt, want := range test.Formats { + if got := FormatLogRecord(fmt, test.Record); got != want { + t.Errorf("%s - %s:", name, fmt) + t.Errorf(" got %q", got) + t.Errorf(" want %q", want) + } + } + } +} + +var logRecordWriteTests = []struct { + Test string + Record *LogRecord + Console string +}{ + { + Test: "Normal message", + Record: &LogRecord{ + Level: CRITICAL, + Source: "source", + Message: "message", + Created: now, + }, + Console: "[02/13/09 23:31:30] [CRIT] message\n", + }, +} + +func TestConsoleLogWriter(t *testing.T) { + console := make(ConsoleLogWriter) + + r, w := io.Pipe() + go console.run(w) + defer console.Close() + + buf := make([]byte, 1024) + + for _, test := range logRecordWriteTests { + name := test.Test + + console.LogWrite(test.Record) + n, _ := r.Read(buf) + + if got, want := string(buf[:n]), test.Console; got != want { + t.Errorf("%s: got %q", name, got) + t.Errorf("%s: want %q", name, want) + } + } +} + +func TestFileLogWriter(t *testing.T) { + defer func(buflen int) { + LogBufferLength = buflen + }(LogBufferLength) + LogBufferLength = 0 + + w := NewFileLogWriter(testLogFile, false) + if w == nil { + t.Fatalf("Invalid return: w should not be nil") + } + defer os.Remove(testLogFile) + + w.LogWrite(newLogRecord(CRITICAL, "source", "message")) + w.Close() + runtime.Gosched() + + if contents, err := ioutil.ReadFile(testLogFile); err != nil { + t.Errorf("read(%q): %s", testLogFile, err) + } else if len(contents) != 50 { + t.Errorf("malformed filelog: %q (%d bytes)", string(contents), len(contents)) + } +} + +func TestXMLLogWriter(t *testing.T) { + defer func(buflen int) { + LogBufferLength = buflen + }(LogBufferLength) + LogBufferLength = 0 + + w := NewXMLLogWriter(testLogFile, false) + if w == nil { + t.Fatalf("Invalid return: w should not be nil") + } + defer os.Remove(testLogFile) + + w.LogWrite(newLogRecord(CRITICAL, "source", "message")) + w.Close() + runtime.Gosched() + + if contents, err := ioutil.ReadFile(testLogFile); err != nil { + t.Errorf("read(%q): %s", testLogFile, err) + } else if len(contents) != 185 { + t.Errorf("malformed xmllog: %q (%d bytes)", string(contents), len(contents)) + } +} + +func TestLogger(t *testing.T) { + sl := NewDefaultLogger(WARNING) + if sl == nil { + t.Fatalf("NewDefaultLogger should never return nil") + } + if lw, exist := sl["stdout"]; lw == nil || exist != true { + t.Fatalf("NewDefaultLogger produced invalid logger (DNE or nil)") + } + if sl["stdout"].Level != WARNING { + t.Fatalf("NewDefaultLogger produced invalid logger (incorrect level)") + } + if len(sl) != 1 { + t.Fatalf("NewDefaultLogger produced invalid logger (incorrect map count)") + } + + //func (l *Logger) AddFilter(name string, level int, writer LogWriter) {} + l := make(Logger) + l.AddFilter("stdout", DEBUG, NewConsoleLogWriter()) + if lw, exist := l["stdout"]; lw == nil || exist != true { + t.Fatalf("AddFilter produced invalid logger (DNE or nil)") + } + if l["stdout"].Level != DEBUG { + t.Fatalf("AddFilter produced invalid logger (incorrect level)") + } + if len(l) != 1 { + t.Fatalf("AddFilter produced invalid logger (incorrect map count)") + } + + //func (l *Logger) Warn(format string, args ...interface{}) error {} + if err := l.Warn("%s %d %#v", "Warning:", 1, []int{}); err.Error() != "Warning: 1 []int{}" { + t.Errorf("Warn returned invalid error: %s", err) + } + + //func (l *Logger) Error(format string, args ...interface{}) error {} + if err := l.Error("%s %d %#v", "Error:", 10, []string{}); err.Error() != "Error: 10 []string{}" { + t.Errorf("Error returned invalid error: %s", err) + } + + //func (l *Logger) Critical(format string, args ...interface{}) error {} + if err := l.Critical("%s %d %#v", "Critical:", 100, []int64{}); err.Error() != "Critical: 100 []int64{}" { + t.Errorf("Critical returned invalid error: %s", err) + } + + // Already tested or basically untestable + //func (l *Logger) Log(level int, source, message string) {} + //func (l *Logger) Logf(level int, format string, args ...interface{}) {} + //func (l *Logger) intLogf(level int, format string, args ...interface{}) string {} + //func (l *Logger) Finest(format string, args ...interface{}) {} + //func (l *Logger) Fine(format string, args ...interface{}) {} + //func (l *Logger) Debug(format string, args ...interface{}) {} + //func (l *Logger) Trace(format string, args ...interface{}) {} + //func (l *Logger) Info(format string, args ...interface{}) {} +} + +func TestLogOutput(t *testing.T) { + const ( + expected = "fdf3e51e444da56b4cb400f30bc47424" + ) + + // Unbuffered output + defer func(buflen int) { + LogBufferLength = buflen + }(LogBufferLength) + LogBufferLength = 0 + + l := make(Logger) + + // Delete and open the output log without a timestamp (for a constant md5sum) + l.AddFilter("file", FINEST, NewFileLogWriter(testLogFile, false).SetFormat("[%L] %M")) + defer os.Remove(testLogFile) + + // Send some log messages + l.Log(CRITICAL, "testsrc1", fmt.Sprintf("This message is level %d", int(CRITICAL))) + l.Logf(ERROR, "This message is level %v", ERROR) + l.Logf(WARNING, "This message is level %s", WARNING) + l.Logc(INFO, func() string { return "This message is level INFO" }) + l.Trace("This message is level %d", int(TRACE)) + l.Debug("This message is level %s", DEBUG) + l.Fine(func() string { return fmt.Sprintf("This message is level %v", FINE) }) + l.Finest("This message is level %v", FINEST) + l.Finest(FINEST, "is also this message's level") + + l.Close() + + contents, err := ioutil.ReadFile(testLogFile) + if err != nil { + t.Fatalf("Could not read output log: %s", err) + } + + sum := md5.New() + sum.Write(contents) + if sumstr := hex.EncodeToString(sum.Sum(nil)); sumstr != expected { + t.Errorf("--- Log Contents:\n%s---", string(contents)) + t.Fatalf("Checksum does not match: %s (expecting %s)", sumstr, expected) + } +} + +func TestCountMallocs(t *testing.T) { + const N = 1 + var m runtime.MemStats + getMallocs := func() uint64 { + runtime.ReadMemStats(&m) + return m.Mallocs + } + + // Console logger + sl := NewDefaultLogger(INFO) + mallocs := 0 - getMallocs() + for i := 0; i < N; i++ { + sl.Log(WARNING, "here", "This is a WARNING message") + } + mallocs += getMallocs() + fmt.Printf("mallocs per sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) + + // Console logger formatted + mallocs = 0 - getMallocs() + for i := 0; i < N; i++ { + sl.Logf(WARNING, "%s is a log message with level %d", "This", WARNING) + } + mallocs += getMallocs() + fmt.Printf("mallocs per sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) + + // Console logger (not logged) + sl = NewDefaultLogger(INFO) + mallocs = 0 - getMallocs() + for i := 0; i < N; i++ { + sl.Log(DEBUG, "here", "This is a DEBUG log message") + } + mallocs += getMallocs() + fmt.Printf("mallocs per unlogged sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) + + // Console logger formatted (not logged) + mallocs = 0 - getMallocs() + for i := 0; i < N; i++ { + sl.Logf(DEBUG, "%s is a log message with level %d", "This", DEBUG) + } + mallocs += getMallocs() + fmt.Printf("mallocs per unlogged sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) +} + +func TestXMLConfig(t *testing.T) { + const ( + configfile = "example.xml" + ) + + fd, err := os.Create(configfile) + if err != nil { + t.Fatalf("Could not open %s for writing: %s", configfile, err) + } + + fmt.Fprintln(fd, "") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " stdout") + fmt.Fprintln(fd, " console") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " DEBUG") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " file") + fmt.Fprintln(fd, " file") + fmt.Fprintln(fd, " FINEST") + fmt.Fprintln(fd, " test.log") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " [%D %T] [%L] (%S) %M") + fmt.Fprintln(fd, " false ") + fmt.Fprintln(fd, " 0M ") + fmt.Fprintln(fd, " 0K ") + fmt.Fprintln(fd, " true ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " xmllog") + fmt.Fprintln(fd, " xml") + fmt.Fprintln(fd, " TRACE") + fmt.Fprintln(fd, " trace.xml") + fmt.Fprintln(fd, " true ") + fmt.Fprintln(fd, " 100M ") + fmt.Fprintln(fd, " 6K ") + fmt.Fprintln(fd, " false ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, " donotopen") + fmt.Fprintln(fd, " socket") + fmt.Fprintln(fd, " FINEST") + fmt.Fprintln(fd, " 192.168.1.255:12124 ") + fmt.Fprintln(fd, " udp ") + fmt.Fprintln(fd, " ") + fmt.Fprintln(fd, "") + fd.Close() + + log := make(Logger) + log.LoadConfiguration(configfile) + defer os.Remove("trace.xml") + defer os.Remove("test.log") + defer log.Close() + + // Make sure we got all loggers + if len(log) != 3 { + t.Fatalf("XMLConfig: Expected 3 filters, found %d", len(log)) + } + + // Make sure they're the right keys + if _, ok := log["stdout"]; !ok { + t.Errorf("XMLConfig: Expected stdout logger") + } + if _, ok := log["file"]; !ok { + t.Fatalf("XMLConfig: Expected file logger") + } + if _, ok := log["xmllog"]; !ok { + t.Fatalf("XMLConfig: Expected xmllog logger") + } + + // Make sure they're the right type + if _, ok := log["stdout"].LogWriter.(ConsoleLogWriter); !ok { + t.Fatalf("XMLConfig: Expected stdout to be ConsoleLogWriter, found %T", log["stdout"].LogWriter) + } + if _, ok := log["file"].LogWriter.(*FileLogWriter); !ok { + t.Fatalf("XMLConfig: Expected file to be *FileLogWriter, found %T", log["file"].LogWriter) + } + if _, ok := log["xmllog"].LogWriter.(*FileLogWriter); !ok { + t.Fatalf("XMLConfig: Expected xmllog to be *FileLogWriter, found %T", log["xmllog"].LogWriter) + } + + // Make sure levels are set + if lvl := log["stdout"].Level; lvl != DEBUG { + t.Errorf("XMLConfig: Expected stdout to be set to level %d, found %d", DEBUG, lvl) + } + if lvl := log["file"].Level; lvl != FINEST { + t.Errorf("XMLConfig: Expected file to be set to level %d, found %d", FINEST, lvl) + } + if lvl := log["xmllog"].Level; lvl != TRACE { + t.Errorf("XMLConfig: Expected xmllog to be set to level %d, found %d", TRACE, lvl) + } + + // Make sure the w is open and points to the right file + if fname := log["file"].LogWriter.(*FileLogWriter).file.Name(); fname != "test.log" { + t.Errorf("XMLConfig: Expected file to have opened %s, found %s", "test.log", fname) + } + + // Make sure the XLW is open and points to the right file + if fname := log["xmllog"].LogWriter.(*FileLogWriter).file.Name(); fname != "trace.xml" { + t.Errorf("XMLConfig: Expected xmllog to have opened %s, found %s", "trace.xml", fname) + } + + // Move XML log file + os.Rename(configfile, "examples/"+configfile) // Keep this so that an example with the documentation is available +} + +func BenchmarkFormatLogRecord(b *testing.B) { + const updateEvery = 1 + rec := &LogRecord{ + Level: CRITICAL, + Created: now, + Source: "source", + Message: "message", + } + for i := 0; i < b.N; i++ { + rec.Created = rec.Created.Add(1 * time.Second / updateEvery) + if i%2 == 0 { + FormatLogRecord(FORMAT_DEFAULT, rec) + } else { + FormatLogRecord(FORMAT_SHORT, rec) + } + } +} + +func BenchmarkConsoleLog(b *testing.B) { + /* This doesn't seem to work on OS X + sink, err := os.Open(os.DevNull) + if err != nil { + panic(err) + } + if err := syscall.Dup2(int(sink.Fd()), syscall.Stdout); err != nil { + panic(err) + } + */ + + stdout = ioutil.Discard + sl := NewDefaultLogger(INFO) + for i := 0; i < b.N; i++ { + sl.Log(WARNING, "here", "This is a log message") + } +} + +func BenchmarkConsoleNotLogged(b *testing.B) { + sl := NewDefaultLogger(INFO) + for i := 0; i < b.N; i++ { + sl.Log(DEBUG, "here", "This is a log message") + } +} + +func BenchmarkConsoleUtilLog(b *testing.B) { + sl := NewDefaultLogger(INFO) + for i := 0; i < b.N; i++ { + sl.Info("%s is a log message", "This") + } +} + +func BenchmarkConsoleUtilNotLog(b *testing.B) { + sl := NewDefaultLogger(INFO) + for i := 0; i < b.N; i++ { + sl.Debug("%s is a log message", "This") + } +} + +func BenchmarkFileLog(b *testing.B) { + sl := make(Logger) + b.StopTimer() + sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) + b.StartTimer() + for i := 0; i < b.N; i++ { + sl.Log(WARNING, "here", "This is a log message") + } + b.StopTimer() + os.Remove("benchlog.log") +} + +func BenchmarkFileNotLogged(b *testing.B) { + sl := make(Logger) + b.StopTimer() + sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) + b.StartTimer() + for i := 0; i < b.N; i++ { + sl.Log(DEBUG, "here", "This is a log message") + } + b.StopTimer() + os.Remove("benchlog.log") +} + +func BenchmarkFileUtilLog(b *testing.B) { + sl := make(Logger) + b.StopTimer() + sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) + b.StartTimer() + for i := 0; i < b.N; i++ { + sl.Info("%s is a log message", "This") + } + b.StopTimer() + os.Remove("benchlog.log") +} + +func BenchmarkFileUtilNotLog(b *testing.B) { + sl := make(Logger) + b.StopTimer() + sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) + b.StartTimer() + for i := 0; i < b.N; i++ { + sl.Debug("%s is a log message", "This") + } + b.StopTimer() + os.Remove("benchlog.log") +} + +// Benchmark results (darwin amd64 6g) +//elog.BenchmarkConsoleLog 100000 22819 ns/op +//elog.BenchmarkConsoleNotLogged 2000000 879 ns/op +//elog.BenchmarkConsoleUtilLog 50000 34380 ns/op +//elog.BenchmarkConsoleUtilNotLog 1000000 1339 ns/op +//elog.BenchmarkFileLog 100000 26497 ns/op +//elog.BenchmarkFileNotLogged 2000000 821 ns/op +//elog.BenchmarkFileUtilLog 50000 33945 ns/op +//elog.BenchmarkFileUtilNotLog 1000000 1258 ns/op diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/pattlog.go b/Godeps/_workspace/src/code.google.com/p/log4go/pattlog.go new file mode 100644 index 000000000..8224302b3 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/pattlog.go @@ -0,0 +1,122 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "fmt" + "bytes" + "io" +) + +const ( + FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M" + FORMAT_SHORT = "[%t %d] [%L] %M" + FORMAT_ABBREV = "[%L] %M" +) + +type formatCacheType struct { + LastUpdateSeconds int64 + shortTime, shortDate string + longTime, longDate string +} + +var formatCache = &formatCacheType{} + +// Known format codes: +// %T - Time (15:04:05 MST) +// %t - Time (15:04) +// %D - Date (2006/01/02) +// %d - Date (01/02/06) +// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT) +// %S - Source +// %M - Message +// Ignores unknown formats +// Recommended: "[%D %T] [%L] (%S) %M" +func FormatLogRecord(format string, rec *LogRecord) string { + if rec == nil { + return "" + } + if len(format) == 0 { + return "" + } + + out := bytes.NewBuffer(make([]byte, 0, 64)) + secs := rec.Created.UnixNano() / 1e9 + + cache := *formatCache + if cache.LastUpdateSeconds != secs { + month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year() + hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second() + zone, _ := rec.Created.Zone() + updated := &formatCacheType{ + LastUpdateSeconds: secs, + shortTime: fmt.Sprintf("%02d:%02d", hour, minute), + shortDate: fmt.Sprintf("%02d/%02d/%02d", month, day, year%100), + longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone), + longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day), + } + cache = *updated + formatCache = updated + } + + // Split the string into pieces by % signs + pieces := bytes.Split([]byte(format), []byte{'%'}) + + // Iterate over the pieces, replacing known formats + for i, piece := range pieces { + if i > 0 && len(piece) > 0 { + switch piece[0] { + case 'T': + out.WriteString(cache.longTime) + case 't': + out.WriteString(cache.shortTime) + case 'D': + out.WriteString(cache.longDate) + case 'd': + out.WriteString(cache.shortDate) + case 'L': + out.WriteString(levelStrings[rec.Level]) + case 'S': + out.WriteString(rec.Source) + case 'M': + out.WriteString(rec.Message) + } + if len(piece) > 1 { + out.Write(piece[1:]) + } + } else if len(piece) > 0 { + out.Write(piece) + } + } + out.WriteByte('\n') + + return out.String() +} + +// This is the standard writer that prints to standard output. +type FormatLogWriter chan *LogRecord + +// This creates a new FormatLogWriter +func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter { + records := make(FormatLogWriter, LogBufferLength) + go records.run(out, format) + return records +} + +func (w FormatLogWriter) run(out io.Writer, format string) { + for rec := range w { + fmt.Fprint(out, FormatLogRecord(format, rec)) + } +} + +// This is the FormatLogWriter's output method. This will block if the output +// buffer is full. +func (w FormatLogWriter) LogWrite(rec *LogRecord) { + w <- rec +} + +// Close stops the logger from sending messages to standard output. Attempts to +// send log messages to this logger after a Close have undefined behavior. +func (w FormatLogWriter) Close() { + close(w) +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/socklog.go b/Godeps/_workspace/src/code.google.com/p/log4go/socklog.go new file mode 100644 index 000000000..1d224a99d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/socklog.go @@ -0,0 +1,57 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "encoding/json" + "fmt" + "net" + "os" +) + +// This log writer sends output to a socket +type SocketLogWriter chan *LogRecord + +// This is the SocketLogWriter's output method +func (w SocketLogWriter) LogWrite(rec *LogRecord) { + w <- rec +} + +func (w SocketLogWriter) Close() { + close(w) +} + +func NewSocketLogWriter(proto, hostport string) SocketLogWriter { + sock, err := net.Dial(proto, hostport) + if err != nil { + fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err) + return nil + } + + w := SocketLogWriter(make(chan *LogRecord, LogBufferLength)) + + go func() { + defer func() { + if sock != nil && proto == "tcp" { + sock.Close() + } + }() + + for rec := range w { + // Marshall into JSON + js, err := json.Marshal(rec) + if err != nil { + fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) + return + } + + _, err = sock.Write(js) + if err != nil { + fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) + return + } + } + }() + + return w +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/termlog.go b/Godeps/_workspace/src/code.google.com/p/log4go/termlog.go new file mode 100644 index 000000000..1ed2e4e0d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/termlog.go @@ -0,0 +1,45 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "io" + "os" + "fmt" +) + +var stdout io.Writer = os.Stdout + +// This is the standard writer that prints to standard output. +type ConsoleLogWriter chan *LogRecord + +// This creates a new ConsoleLogWriter +func NewConsoleLogWriter() ConsoleLogWriter { + records := make(ConsoleLogWriter, LogBufferLength) + go records.run(stdout) + return records +} + +func (w ConsoleLogWriter) run(out io.Writer) { + var timestr string + var timestrAt int64 + + for rec := range w { + if at := rec.Created.UnixNano() / 1e9; at != timestrAt { + timestr, timestrAt = rec.Created.Format("01/02/06 15:04:05"), at + } + fmt.Fprint(out, "[", timestr, "] [", levelStrings[rec.Level], "] ", rec.Message, "\n") + } +} + +// This is the ConsoleLogWriter's output method. This will block if the output +// buffer is full. +func (w ConsoleLogWriter) LogWrite(rec *LogRecord) { + w <- rec +} + +// Close stops the logger from sending messages to standard output. Attempts to +// send log messages to this logger after a Close have undefined behavior. +func (w ConsoleLogWriter) Close() { + close(w) +} diff --git a/Godeps/_workspace/src/code.google.com/p/log4go/wrapper.go b/Godeps/_workspace/src/code.google.com/p/log4go/wrapper.go new file mode 100644 index 000000000..10ecd88e6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/log4go/wrapper.go @@ -0,0 +1,278 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "errors" + "os" + "fmt" + "strings" +) + +var ( + Global Logger +) + +func init() { + Global = NewDefaultLogger(DEBUG) +} + +// Wrapper for (*Logger).LoadConfiguration +func LoadConfiguration(filename string) { + Global.LoadConfiguration(filename) +} + +// Wrapper for (*Logger).AddFilter +func AddFilter(name string, lvl level, writer LogWriter) { + Global.AddFilter(name, lvl, writer) +} + +// Wrapper for (*Logger).Close (closes and removes all logwriters) +func Close() { + Global.Close() +} + +func Crash(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...) + } + panic(args) +} + +// Logs the given message and crashes the program +func Crashf(format string, args ...interface{}) { + Global.intLogf(CRITICAL, format, args...) + Global.Close() // so that hopefully the messages get logged + panic(fmt.Sprintf(format, args...)) +} + +// Compatibility with `log` +func Exit(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) + } + Global.Close() // so that hopefully the messages get logged + os.Exit(0) +} + +// Compatibility with `log` +func Exitf(format string, args ...interface{}) { + Global.intLogf(ERROR, format, args...) + Global.Close() // so that hopefully the messages get logged + os.Exit(0) +} + +// Compatibility with `log` +func Stderr(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) + } +} + +// Compatibility with `log` +func Stderrf(format string, args ...interface{}) { + Global.intLogf(ERROR, format, args...) +} + +// Compatibility with `log` +func Stdout(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...) + } +} + +// Compatibility with `log` +func Stdoutf(format string, args ...interface{}) { + Global.intLogf(INFO, format, args...) +} + +// Send a log message manually +// Wrapper for (*Logger).Log +func Log(lvl level, source, message string) { + Global.Log(lvl, source, message) +} + +// Send a formatted log message easily +// Wrapper for (*Logger).Logf +func Logf(lvl level, format string, args ...interface{}) { + Global.intLogf(lvl, format, args...) +} + +// Send a closure log message +// Wrapper for (*Logger).Logc +func Logc(lvl level, closure func() string) { + Global.intLogc(lvl, closure) +} + +// Utility for finest log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Finest +func Finest(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINEST + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for fine log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Fine +func Fine(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for debug log messages +// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments) +// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time. +// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint). +// Wrapper for (*Logger).Debug +func Debug(arg0 interface{}, args ...interface{}) { + const ( + lvl = DEBUG + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for trace log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Trace +func Trace(arg0 interface{}, args ...interface{}) { + const ( + lvl = TRACE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for info log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Info +func Info(arg0 interface{}, args ...interface{}) { + const ( + lvl = INFO + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Warn +func Warn(arg0 interface{}, args ...interface{}) error { + const ( + lvl = WARNING + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} + +// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Error +func Error(arg0 interface{}, args ...interface{}) error { + const ( + lvl = ERROR + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} + +// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Critical +func Critical(arg0 interface{}, args ...interface{}) error { + const ( + lvl = CRITICAL + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/.gitignore b/Godeps/_workspace/src/github.com/anachronistic/apns/.gitignore new file mode 100644 index 000000000..d73587219 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.pem \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/LICENSE b/Godeps/_workspace/src/github.com/anachronistic/apns/LICENSE new file mode 100644 index 000000000..b80ffbd8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Alan Harris + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/client.go b/Godeps/_workspace/src/github.com/anachronistic/apns/client.go new file mode 100644 index 000000000..3fc079a87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/client.go @@ -0,0 +1,167 @@ +package apns + +import ( + "crypto/tls" + "errors" + "net" + "strings" + "time" +) + +var _ APNSClient = &Client{} + +// APNSClient is an APNS client. +type APNSClient interface { + ConnectAndWrite(resp *PushNotificationResponse, payload []byte) (err error) + Send(pn *PushNotification) (resp *PushNotificationResponse) +} + +// Client contains the fields necessary to communicate +// with Apple, such as the gateway to use and your +// certificate contents. +// +// You'll need to provide your own CertificateFile +// and KeyFile to send notifications. Ideally, you'll +// just set the CertificateFile and KeyFile fields to +// a location on drive where the certs can be loaded, +// but if you prefer you can use the CertificateBase64 +// and KeyBase64 fields to store the actual contents. +type Client struct { + Gateway string + CertificateFile string + CertificateBase64 string + KeyFile string + KeyBase64 string +} + +// BareClient can be used to set the contents of your +// certificate and key blocks manually. +func BareClient(gateway, certificateBase64, keyBase64 string) (c *Client) { + c = new(Client) + c.Gateway = gateway + c.CertificateBase64 = certificateBase64 + c.KeyBase64 = keyBase64 + return +} + +// NewClient assumes you'll be passing in paths that +// point to your certificate and key. +func NewClient(gateway, certificateFile, keyFile string) (c *Client) { + c = new(Client) + c.Gateway = gateway + c.CertificateFile = certificateFile + c.KeyFile = keyFile + return +} + +// Send connects to the APN service and sends your push notification. +// Remember that if the submission is successful, Apple won't reply. +func (client *Client) Send(pn *PushNotification) (resp *PushNotificationResponse) { + resp = new(PushNotificationResponse) + + payload, err := pn.ToBytes() + if err != nil { + resp.Success = false + resp.Error = err + return + } + + err = client.ConnectAndWrite(resp, payload) + if err != nil { + resp.Success = false + resp.Error = err + return + } + + resp.Success = true + resp.Error = nil + + return +} + +// ConnectAndWrite establishes the connection to Apple and handles the +// transmission of your push notification, as well as waiting for a reply. +// +// In lieu of a timeout (which would be available in Go 1.1) +// we use a timeout channel pattern instead. We start two goroutines, +// one of which just sleeps for TimeoutSeconds seconds, while the other +// waits for a response from the Apple servers. +// +// Whichever channel puts data on first is the "winner". As such, it's +// possible to get a false positive if Apple takes a long time to respond. +// It's probably not a deal-breaker, but something to be aware of. +func (client *Client) ConnectAndWrite(resp *PushNotificationResponse, payload []byte) (err error) { + var cert tls.Certificate + + if len(client.CertificateBase64) == 0 && len(client.KeyBase64) == 0 { + // The user did not specify raw block contents, so check the filesystem. + cert, err = tls.LoadX509KeyPair(client.CertificateFile, client.KeyFile) + } else { + // The user provided the raw block contents, so use that. + cert, err = tls.X509KeyPair([]byte(client.CertificateBase64), []byte(client.KeyBase64)) + } + + if err != nil { + return err + } + + gatewayParts := strings.Split(client.Gateway, ":") + conf := &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: gatewayParts[0], + } + + conn, err := net.Dial("tcp", client.Gateway) + if err != nil { + return err + } + defer conn.Close() + + tlsConn := tls.Client(conn, conf) + err = tlsConn.Handshake() + if err != nil { + return err + } + defer tlsConn.Close() + + _, err = tlsConn.Write(payload) + if err != nil { + return err + } + + // Create one channel that will serve to handle + // timeouts when the notification succeeds. + timeoutChannel := make(chan bool, 1) + go func() { + time.Sleep(time.Second * TimeoutSeconds) + timeoutChannel <- true + }() + + // This channel will contain the binary response + // from Apple in the event of a failure. + responseChannel := make(chan []byte, 1) + go func() { + buffer := make([]byte, 6, 6) + tlsConn.Read(buffer) + responseChannel <- buffer + }() + + // First one back wins! + // The data structure for an APN response is as follows: + // + // command -> 1 byte + // status -> 1 byte + // identifier -> 4 bytes + // + // The first byte will always be set to 8. + select { + case r := <-responseChannel: + resp.Success = false + resp.AppleResponse = ApplePushResponses[r[1]] + err = errors.New(resp.AppleResponse) + case <-timeoutChannel: + resp.Success = true + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock.go b/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock.go new file mode 100644 index 000000000..29a1f4b23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock.go @@ -0,0 +1,21 @@ +package apns + +import "github.com/stretchr/testify/mock" + +type MockClient struct { + mock.Mock +} + +func (m *MockClient) ConnectAndWrite(resp *PushNotificationResponse, payload []byte) (err error) { + return m.Called(resp, payload).Error(0) +} + +func (m *MockClient) Send(pn *PushNotification) (resp *PushNotificationResponse) { + r := m.Called(pn).Get(0) + if r != nil { + if r, ok := r.(*PushNotificationResponse); ok { + return r + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock_test.go b/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock_test.go new file mode 100644 index 000000000..86e997b5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/client_mock_test.go @@ -0,0 +1,24 @@ +package apns + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMockClientConnectAndWrite(t *testing.T) { + m := &MockClient{} + m.On("ConnectAndWrite", (*PushNotificationResponse)(nil), []byte(nil)).Return(nil) + assert.Nil(t, m.ConnectAndWrite(nil, nil)) + m.On("ConnectAndWrite", &PushNotificationResponse{}, []byte{}).Return(errors.New("test")) + assert.Equal(t, errors.New("test"), m.ConnectAndWrite(&PushNotificationResponse{}, []byte{})) +} + +func TestMockClientSend(t *testing.T) { + m := &MockClient{} + m.On("Send", (*PushNotification)(nil)).Return(nil) + assert.Nil(t, m.Send(nil)) + m.On("Send", &PushNotification{}).Return(&PushNotificationResponse{}) + assert.Equal(t, &PushNotificationResponse{}, m.Send(&PushNotification{})) +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/feedback.go b/Godeps/_workspace/src/github.com/anachronistic/apns/feedback.go new file mode 100644 index 000000000..32e7f0f15 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/feedback.go @@ -0,0 +1,102 @@ +package apns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "encoding/hex" + "errors" + "net" + "strings" + "time" +) + +// Wait at most this many seconds for feedback data from Apple. +const FeedbackTimeoutSeconds = 5 + +// FeedbackChannel will receive individual responses from Apple. +var FeedbackChannel = make(chan (*FeedbackResponse)) + +// If there's nothing to read, ShutdownChannel gets a true. +var ShutdownChannel = make(chan bool) + +// FeedbackResponse represents a device token that Apple has +// indicated should not be sent to in the future. +type FeedbackResponse struct { + Timestamp uint32 + DeviceToken string +} + +// NewFeedbackResponse creates and returns a FeedbackResponse structure. +func NewFeedbackResponse() (resp *FeedbackResponse) { + resp = new(FeedbackResponse) + return +} + +// ListenForFeedback connects to the Apple Feedback Service +// and checks for device tokens. +// +// Feedback consists of device tokens that should +// not be sent to in the future; Apple *does* monitor that +// you respect this so you should be checking it ;) +func (client *Client) ListenForFeedback() (err error) { + var cert tls.Certificate + + if len(client.CertificateBase64) == 0 && len(client.KeyBase64) == 0 { + // The user did not specify raw block contents, so check the filesystem. + cert, err = tls.LoadX509KeyPair(client.CertificateFile, client.KeyFile) + } else { + // The user provided the raw block contents, so use that. + cert, err = tls.X509KeyPair([]byte(client.CertificateBase64), []byte(client.KeyBase64)) + } + + if err != nil { + return err + } + + gatewayParts := strings.Split(client.Gateway, ":") + conf := &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: gatewayParts[0], + } + + conn, err := net.Dial("tcp", client.Gateway) + if err != nil { + return err + } + defer conn.Close() + conn.SetReadDeadline(time.Now().Add(FeedbackTimeoutSeconds * time.Second)) + + tlsConn := tls.Client(conn, conf) + err = tlsConn.Handshake() + if err != nil { + return err + } + + var tokenLength uint16 + buffer := make([]byte, 38, 38) + deviceToken := make([]byte, 32, 32) + + for { + _, err := tlsConn.Read(buffer) + if err != nil { + ShutdownChannel <- true + break + } + + resp := NewFeedbackResponse() + + r := bytes.NewReader(buffer) + binary.Read(r, binary.BigEndian, &resp.Timestamp) + binary.Read(r, binary.BigEndian, &tokenLength) + binary.Read(r, binary.BigEndian, &deviceToken) + if tokenLength != 32 { + return errors.New("token length should be equal to 32, but isn't") + } + resp.DeviceToken = hex.EncodeToString(deviceToken) + + FeedbackChannel <- resp + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/legacy.go b/Godeps/_workspace/src/github.com/anachronistic/apns/legacy.go new file mode 100644 index 000000000..44da3dde8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/legacy.go @@ -0,0 +1,15 @@ +package apns + +// This file exists to support backward-compatibility +// as I gradually refactor and overhaul. Ideally, golint +// should only complain about this file (and we should +// try to keep its complaints to a minimum). + +// These variables map old identifiers to their current format. +var ( + APPLE_PUSH_RESPONSES = ApplePushResponses + FEEDBACK_TIMEOUT_SECONDS = FeedbackTimeoutSeconds + IDENTIFIER_UBOUND = IdentifierUbound + MAX_PAYLOAD_SIZE_BYTES = MaxPayloadSizeBytes + TIMEOUT_SECONDS = TimeoutSeconds +) diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/legacy_test.go b/Godeps/_workspace/src/github.com/anachronistic/apns/legacy_test.go new file mode 100644 index 000000000..4b983c128 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/legacy_test.go @@ -0,0 +1,27 @@ +package apns + +import ( + "reflect" + "testing" +) + +// These identifiers were changed to resolve golint violations. +// However, it's possible that legacy code may rely on them. This +// will help avoid springing a breaking change on people. +func TestLegacyConstants(t *testing.T) { + if !reflect.DeepEqual(APPLE_PUSH_RESPONSES, ApplePushResponses) { + t.Error("expected APPLE_PUSH_RESPONSES to equal ApplePushResponses") + } + if !reflect.DeepEqual(FEEDBACK_TIMEOUT_SECONDS, FeedbackTimeoutSeconds) { + t.Error("expected FEEDBACK_TIMEOUT_SECONDS to equal FeedbackTimeoutSeconds") + } + if !reflect.DeepEqual(IDENTIFIER_UBOUND, IdentifierUbound) { + t.Error("expected IDENTIFIER_UBOUND to equal IdentifierUbound") + } + if !reflect.DeepEqual(MAX_PAYLOAD_SIZE_BYTES, MaxPayloadSizeBytes) { + t.Error("expected MAX_PAYLOAD_SIZE_BYTES to equal MaxPayloadSizeBytes") + } + if !reflect.DeepEqual(TIMEOUT_SECONDS, TimeoutSeconds) { + t.Error("expected TIMEOUT_SECONDS to equal TimeoutSeconds") + } +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/mock_feedback_server.go b/Godeps/_workspace/src/github.com/anachronistic/apns/mock_feedback_server.go new file mode 100644 index 000000000..d7536f261 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/mock_feedback_server.go @@ -0,0 +1,53 @@ +package apns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "log" + "net" + "time" +) + +// StartMockFeedbackServer spins up a simple stand-in for the Apple +// feedback service that can be used for testing purposes. Doesn't +// handle many errors, etc. Just for the sake of having something "live" +// to hit. +func StartMockFeedbackServer(certFile, keyFile string) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + log.Panic(err) + } + config := tls.Config{Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequireAnyClientCert} + log.Print("- starting Mock Apple Feedback TCP server at 0.0.0.0:5555") + + srv, _ := tls.Listen("tcp", "0.0.0.0:5555", &config) + for { + conn, err := srv.Accept() + if err != nil { + log.Panic(err) + } + go loop(conn) + } +} + +// Writes binary data to the client in the same +// manner as the Apple service would. +// +// [4 bytes, 2 bytes, 32 bytes] = 38 bytes total +func loop(conn net.Conn) { + defer conn.Close() + for { + timeT := uint32(1368809290) // 2013-05-17 12:48:10 -0400 + token := "abcd1234efab5678abcd1234efab5678" + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, timeT) + binary.Write(buf, binary.BigEndian, uint16(len(token))) + binary.Write(buf, binary.BigEndian, []byte(token)) + conn.Write(buf.Bytes()) + + dur, _ := time.ParseDuration("1s") + time.Sleep(dur) + } +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification.go b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification.go new file mode 100644 index 000000000..e6b58d575 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification.go @@ -0,0 +1,175 @@ +package apns + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "math/rand" + "strconv" + "time" +) + +// Push commands always start with command value 2. +const pushCommandValue = 2 + +// Your total notification payload cannot exceed 2 KB. +const MaxPayloadSizeBytes = 2048 + +// Every push notification gets a pseudo-unique identifier; +// this establishes the upper boundary for it. Apple will return +// this identifier if there is an issue sending your notification. +const IdentifierUbound = 9999 + +// Constants related to the payload fields and their lengths. +const ( + deviceTokenItemid = 1 + payloadItemid = 2 + notificationIdentifierItemid = 3 + expirationDateItemid = 4 + priorityItemid = 5 + deviceTokenLength = 32 + notificationIdentifierLength = 4 + expirationDateLength = 4 + priorityLength = 1 +) + +// Payload contains the notification data for your request. +// +// Alert is an interface here because it supports either a string +// or a dictionary, represented within by an AlertDictionary struct. +type Payload struct { + Alert interface{} `json:"alert,omitempty"` + Badge int `json:"badge,omitempty"` + Sound string `json:"sound,omitempty"` + ContentAvailable int `json:"content-available,omitempty"` + Category string `json:"category,omitempty"` +} + +// NewPayload creates and returns a Payload structure. +func NewPayload() *Payload { + return new(Payload) +} + +// AlertDictionary is a more complex notification payload. +// +// From the APN docs: "Use the ... alert dictionary in general only if you absolutely need to." +// The AlertDictionary is suitable for specific localization needs. +type AlertDictionary struct { + Body string `json:"body,omitempty"` + ActionLocKey string `json:"action-loc-key,omitempty"` + LocKey string `json:"loc-key,omitempty"` + LocArgs []string `json:"loc-args,omitempty"` + LaunchImage string `json:"launch-image,omitempty"` +} + +// NewAlertDictionary creates and returns an AlertDictionary structure. +func NewAlertDictionary() *AlertDictionary { + return new(AlertDictionary) +} + +// PushNotification is the wrapper for the Payload. +// The length fields are computed in ToBytes() and aren't represented here. +type PushNotification struct { + Identifier int32 + Expiry uint32 + DeviceToken string + payload map[string]interface{} + Priority uint8 +} + +// NewPushNotification creates and returns a PushNotification structure. +// It also initializes the pseudo-random identifier. +func NewPushNotification() (pn *PushNotification) { + pn = new(PushNotification) + pn.payload = make(map[string]interface{}) + pn.Identifier = rand.New(rand.NewSource(time.Now().UnixNano())).Int31n(IdentifierUbound) + pn.Priority = 10 + return +} + +// AddPayload sets the "aps" payload section of the request. It also +// has a hack described within to deal with specific zero values. +func (pn *PushNotification) AddPayload(p *Payload) { + // This deserves some explanation. + // + // Setting an exported field of type int to 0 + // triggers the omitempty behavior if you've set it. + // Since the badge is optional, we should omit it if + // it's not set. However, we want to include it if the + // value is 0, so there's a hack in push_notification.go + // that exploits the fact that Apple treats -1 for a + // badge value as though it were 0 (i.e. it clears the + // badge but doesn't stop the notification from going + // through successfully.) + // + // Still a hack though :) + if p.Badge == 0 { + p.Badge = -1 + } + pn.Set("aps", p) +} + +// Get returns the value of a payload key, if it exists. +func (pn *PushNotification) Get(key string) interface{} { + return pn.payload[key] +} + +// Set defines the value of a payload key. +func (pn *PushNotification) Set(key string, value interface{}) { + pn.payload[key] = value +} + +// PayloadJSON returns the current payload in JSON format. +func (pn *PushNotification) PayloadJSON() ([]byte, error) { + return json.Marshal(pn.payload) +} + +// PayloadString returns the current payload in string format. +func (pn *PushNotification) PayloadString() (string, error) { + j, err := pn.PayloadJSON() + return string(j), err +} + +// ToBytes returns a byte array of the complete PushNotification +// struct. This array is what should be transmitted to the APN Service. +func (pn *PushNotification) ToBytes() ([]byte, error) { + token, err := hex.DecodeString(pn.DeviceToken) + if err != nil { + return nil, err + } + if len(token) != deviceTokenLength { + return nil, errors.New("device token has incorrect length") + } + payload, err := pn.PayloadJSON() + if err != nil { + return nil, err + } + if len(payload) > MaxPayloadSizeBytes { + return nil, errors.New("payload is larger than the " + strconv.Itoa(MaxPayloadSizeBytes) + " byte limit") + } + + frameBuffer := new(bytes.Buffer) + binary.Write(frameBuffer, binary.BigEndian, uint8(deviceTokenItemid)) + binary.Write(frameBuffer, binary.BigEndian, uint16(deviceTokenLength)) + binary.Write(frameBuffer, binary.BigEndian, token) + binary.Write(frameBuffer, binary.BigEndian, uint8(payloadItemid)) + binary.Write(frameBuffer, binary.BigEndian, uint16(len(payload))) + binary.Write(frameBuffer, binary.BigEndian, payload) + binary.Write(frameBuffer, binary.BigEndian, uint8(notificationIdentifierItemid)) + binary.Write(frameBuffer, binary.BigEndian, uint16(notificationIdentifierLength)) + binary.Write(frameBuffer, binary.BigEndian, pn.Identifier) + binary.Write(frameBuffer, binary.BigEndian, uint8(expirationDateItemid)) + binary.Write(frameBuffer, binary.BigEndian, uint16(expirationDateLength)) + binary.Write(frameBuffer, binary.BigEndian, pn.Expiry) + binary.Write(frameBuffer, binary.BigEndian, uint8(priorityItemid)) + binary.Write(frameBuffer, binary.BigEndian, uint16(priorityLength)) + binary.Write(frameBuffer, binary.BigEndian, pn.Priority) + + buffer := bytes.NewBuffer([]byte{}) + binary.Write(buffer, binary.BigEndian, uint8(pushCommandValue)) + binary.Write(buffer, binary.BigEndian, uint32(frameBuffer.Len())) + binary.Write(buffer, binary.BigEndian, frameBuffer.Bytes()) + return buffer.Bytes(), nil +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_response.go b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_response.go new file mode 100644 index 000000000..f08dc06e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_response.go @@ -0,0 +1,36 @@ +package apns + +// The maximum number of seconds we're willing to wait for a response +// from the Apple Push Notification Service. +const TimeoutSeconds = 5 + +// This enumerates the response codes that Apple defines +// for push notification attempts. +var ApplePushResponses = map[uint8]string{ + 0: "NO_ERRORS", + 1: "PROCESSING_ERROR", + 2: "MISSING_DEVICE_TOKEN", + 3: "MISSING_TOPIC", + 4: "MISSING_PAYLOAD", + 5: "INVALID_TOKEN_SIZE", + 6: "INVALID_TOPIC_SIZE", + 7: "INVALID_PAYLOAD_SIZE", + 8: "INVALID_TOKEN", + 10: "SHUTDOWN", + 255: "UNKNOWN", +} + +// PushNotificationResponse details what Apple had to say, if anything. +type PushNotificationResponse struct { + Success bool + AppleResponse string + Error error +} + +// NewPushNotificationResponse creates and returns a new PushNotificationResponse +// structure; it defaults to being unsuccessful at first. +func NewPushNotificationResponse() (resp *PushNotificationResponse) { + resp = new(PushNotificationResponse) + resp.Success = false + return +} diff --git a/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_test.go b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_test.go new file mode 100644 index 000000000..a17b1c833 --- /dev/null +++ b/Godeps/_workspace/src/github.com/anachronistic/apns/push_notification_test.go @@ -0,0 +1,111 @@ +package apns + +import ( + "testing" +) + +const testDeviceToken = "e93b7686988b4b5fd334298e60e73d90035f6d12628a80b4029bde0dec514df9" + +// Create a new Payload that specifies simple text, +// a badge counter, and a custom notification sound. +func mockPayload() (payload *Payload) { + payload = NewPayload() + payload.Alert = "You have mail!" + payload.Badge = 42 + payload.Sound = "bingbong.aiff" + return +} + +// See the commentary in push_notification.go for information +// on why we're testing a badge of value 0. +func mockZeroBadgePayload() (payload *Payload) { + payload = mockPayload() + payload.Badge = 0 + return +} + +// Create a new AlertDictionary. Apple recommends you not use +// the more complex alert style unless absolutely necessary. +func mockAlertDictionary() (dict *AlertDictionary) { + args := make([]string, 1) + args[0] = "localized args" + + dict = NewAlertDictionary() + dict.Body = "Complex Message" + dict.ActionLocKey = "Play a Game!" + dict.LocKey = "localized key" + dict.LocArgs = args + dict.LaunchImage = "image.jpg" + return +} + +func TestBasicAlert(t *testing.T) { + payload := mockPayload() + pn := NewPushNotification() + + pn.DeviceToken = testDeviceToken + pn.AddPayload(payload) + + bytes, _ := pn.ToBytes() + json, _ := pn.PayloadJSON() + if len(bytes) != 130 { + t.Error("expected 130 bytes; got", len(bytes)) + } + if len(json) != 69 { + t.Error("expected 69 bytes; got", len(json)) + } +} + +func TestAlertDictionary(t *testing.T) { + dict := mockAlertDictionary() + payload := mockPayload() + payload.Alert = dict + + pn := NewPushNotification() + pn.DeviceToken = testDeviceToken + pn.AddPayload(payload) + + bytes, _ := pn.ToBytes() + json, _ := pn.PayloadJSON() + if len(bytes) != 255 { + t.Error("expected 255 bytes; got", len(bytes)) + } + if len(json) != 194 { + t.Error("expected 194 bytes; got", len(bytes)) + } +} + +func TestCustomParameters(t *testing.T) { + payload := mockPayload() + pn := NewPushNotification() + + pn.DeviceToken = testDeviceToken + pn.AddPayload(payload) + pn.Set("foo", "bar") + + if pn.Get("foo") != "bar" { + t.Error("unable to set a custom property") + } + if pn.Get("not_set") != nil { + t.Error("expected a missing key to return nil") + } + + bytes, _ := pn.ToBytes() + json, _ := pn.PayloadJSON() + if len(bytes) != 142 { + t.Error("expected 110 bytes; got", len(bytes)) + } + if len(json) != 81 { + t.Error("expected 81 bytes; got", len(json)) + } +} + +func TestZeroBadgeChangesToNegativeOne(t *testing.T) { + payload := mockZeroBadgePayload() + pn := NewPushNotification() + pn.AddPayload(payload) + + if payload.Badge != -1 { + t.Error("expected 0 badge value to be converted to -1; got", payload.Badge) + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..ce4c5e27a --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,142 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +func rValuesAtPath(v interface{}, path string, create bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, create) + if vals != nil && len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByName(c) + if create && value.Kind() == reflect.Ptr && value.IsNil() { + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if create { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of objects at the lexical path inside of a structure +func ValuesAtPath(i interface{}, path string) []interface{} { + if rvals := rValuesAtPath(i, path, false); rvals != nil { + vals := make([]interface{}, len(rvals)) + for i, rval := range rvals { + vals[i] = rval.Interface() + } + return vals + } + return nil +} + +// SetValueAtPath sets an object at the lexical path inside of a structure +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true); rvals != nil { + for _, rval := range rvals { + rval.Set(reflect.ValueOf(v)) + } + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value_test.go new file mode 100644 index 000000000..881927365 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/path_value_test.go @@ -0,0 +1,60 @@ +package awsutil_test + +import ( + "testing" + + "github.com/awslabs/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +type Struct struct { + A []Struct + a []Struct + B *Struct + D *Struct + C string +} + +var data = Struct{ + A: []Struct{Struct{C: "value1"}, Struct{C: "value2"}, Struct{C: "value3"}}, + a: []Struct{Struct{C: "value1"}, Struct{C: "value2"}, Struct{C: "value3"}}, + B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, + C: "initial", +} + +func TestValueAtPathSuccess(t *testing.T) { + assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "C")) + assert.Equal(t, []interface{}{"value1"}, awsutil.ValuesAtPath(data, "A[0].C")) + assert.Equal(t, []interface{}{"value2"}, awsutil.ValuesAtPath(data, "A[1].C")) + assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[2].C")) + assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[-1].C")) + assert.Equal(t, []interface{}{"value1", "value2", "value3"}, awsutil.ValuesAtPath(data, "A[].C")) + assert.Equal(t, []interface{}{"terminal"}, awsutil.ValuesAtPath(data, "B . B . C")) + assert.Equal(t, []interface{}{"terminal", "terminal2"}, awsutil.ValuesAtPath(data, "B.*.C")) + assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "A.D.X || C")) +} + +func TestValueAtPathFailure(t *testing.T) { + assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "C.x")) + assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, ".x")) + assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "X.Y.Z")) + assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[100].C")) + assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[3].C")) + assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "B.B.C.Z")) + assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "a[-1].C")) + assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(nil, "A.B.C")) +} + +func TestSetValueAtPathSuccess(t *testing.T) { + var s Struct + awsutil.SetValueAtPath(&s, "C", "test1") + awsutil.SetValueAtPath(&s, "B.B.C", "test2") + awsutil.SetValueAtPath(&s, "B.D.C", "test3") + assert.Equal(t, "test1", s.C) + assert.Equal(t, "test2", s.B.B.C) + assert.Equal(t, "test3", s.B.D.C) + + awsutil.SetValueAtPath(&s, "B.*.C", "test0") + assert.Equal(t, "test0", s.B.B.C) + assert.Equal(t, "test0", s.B.D.C) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..2e90f8da4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..6cc6da42e --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/config.go @@ -0,0 +1,101 @@ +package aws + +import ( + "io" + "net/http" + "os" +) + +const DEFAULT_RETRIES = -1 + +var DefaultConfig = &Config{ + Credentials: DefaultCreds(), + Endpoint: "", + Region: os.Getenv("AWS_REGION"), + DisableSSL: false, + ManualSend: false, + HTTPClient: http.DefaultClient, + LogLevel: 0, + Logger: os.Stdout, + MaxRetries: DEFAULT_RETRIES, + DisableParamValidation: false, +} + +type Config struct { + Credentials CredentialsProvider + Endpoint string + Region string + DisableSSL bool + ManualSend bool + HTTPClient *http.Client + LogLevel uint + Logger io.Writer + MaxRetries int + DisableParamValidation bool +} + +func (c Config) Merge(newcfg *Config) *Config { + cfg := Config{} + + if newcfg != nil && newcfg.Credentials != nil { + cfg.Credentials = newcfg.Credentials + } else { + cfg.Credentials = c.Credentials + } + + if newcfg != nil && newcfg.Endpoint != "" { + cfg.Endpoint = newcfg.Endpoint + } else { + cfg.Endpoint = c.Endpoint + } + + if newcfg != nil && newcfg.Region != "" { + cfg.Region = newcfg.Region + } else { + cfg.Region = c.Region + } + + if newcfg != nil && newcfg.DisableSSL { + cfg.DisableSSL = newcfg.DisableSSL + } else { + cfg.DisableSSL = c.DisableSSL + } + + if newcfg != nil && newcfg.ManualSend { + cfg.ManualSend = newcfg.ManualSend + } else { + cfg.ManualSend = c.ManualSend + } + + if newcfg != nil && newcfg.HTTPClient != nil { + cfg.HTTPClient = newcfg.HTTPClient + } else { + cfg.HTTPClient = c.HTTPClient + } + + if newcfg != nil && newcfg.LogLevel != 0 { + cfg.LogLevel = newcfg.LogLevel + } else { + cfg.LogLevel = c.LogLevel + } + + if newcfg != nil && newcfg.Logger != nil { + cfg.Logger = newcfg.Logger + } else { + cfg.Logger = c.Logger + } + + if newcfg != nil && newcfg.MaxRetries != DEFAULT_RETRIES { + cfg.MaxRetries = newcfg.MaxRetries + } else { + cfg.MaxRetries = c.MaxRetries + } + + if newcfg != nil && newcfg.DisableParamValidation { + cfg.DisableParamValidation = newcfg.DisableParamValidation + } else { + cfg.DisableParamValidation = c.DisableParamValidation + } + + return &cfg +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials.go new file mode 100644 index 000000000..4490c674a --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials.go @@ -0,0 +1,288 @@ +package aws + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/vaughan0/go-ini" +) + +var currentTime = time.Now + +// Credentials are used to authenticate and authorize calls that you make to +// AWS. +type Credentials struct { + AccessKeyID string + SecretAccessKey string + SessionToken string +} + +// A CredentialsProvider is a provider of credentials. +type CredentialsProvider interface { + // Credentials returns a set of credentials (or an error if no credentials + // could be provided). + Credentials() (*Credentials, error) +} + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = fmt.Errorf("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = fmt.Errorf("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") +) + +type DefaultCredentialsProvider struct { +} + +func (p *DefaultCredentialsProvider) Credentials() (*Credentials, error) { + env, err := EnvCreds() + if err == nil { + return env.Credentials() + } + + profile, err := ProfileCreds("", "", 10*time.Minute) + if err == nil { + profileCreds, err := profile.Credentials() + if err == nil { + return profileCreds, nil + } + } + + return IAMCreds().Credentials() +} + +func DefaultCreds() CredentialsProvider { + return &DefaultCredentialsProvider{} +} + +// DetectCreds returns a CredentialsProvider based on the available information. +// +// If the access key ID and secret access key are provided, it returns a basic +// provider. +// +// If credentials are available via environment variables, it returns an +// environment provider. +// +// If a profile configuration file is available in the default location and has +// a default profile configured, it returns a profile provider. +// +// Otherwise, it returns an IAM instance provider. +func DetectCreds(accessKeyID, secretAccessKey, sessionToken string) CredentialsProvider { + if accessKeyID != "" && secretAccessKey != "" { + return Creds(accessKeyID, secretAccessKey, sessionToken) + } + + env, err := EnvCreds() + if err == nil { + return env + } + + profile, err := ProfileCreds("", "", 10*time.Minute) + if err != nil { + return IAMCreds() + } + + _, err = profile.Credentials() + if err != nil { + return IAMCreds() + } + + return profile +} + +// EnvCreds returns a static provider of AWS credentials from the process's +// environment, or an error if none are found. +func EnvCreds() (CredentialsProvider, error) { + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return nil, ErrAccessKeyIDNotFound + } + + if secret == "" { + return nil, ErrSecretAccessKeyNotFound + } + + return Creds(id, secret, os.Getenv("AWS_SESSION_TOKEN")), nil +} + +// Creds returns a static provider of credentials. +func Creds(accessKeyID, secretAccessKey, sessionToken string) CredentialsProvider { + return staticCredentialsProvider{ + creds: Credentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + }, + } +} + +// IAMCreds returns a provider which pulls credentials from the local EC2 +// instance's IAM roles. +func IAMCreds() CredentialsProvider { + return &iamProvider{} +} + +// ProfileCreds returns a provider which pulls credentials from the profile +// configuration file. +func ProfileCreds(filename, profile string, expiry time.Duration) (CredentialsProvider, error) { + if filename == "" { + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return nil, errors.New("User home directory not found.") + } + + filename = filepath.Join(homeDir, ".aws", "credentials") + } + + if profile == "" { + profile = "default" + } + + return &profileProvider{ + filename: filename, + profile: profile, + expiry: expiry, + }, nil +} + +type profileProvider struct { + filename string + profile string + expiry time.Duration + + creds Credentials + m sync.Mutex + expiration time.Time +} + +func (p *profileProvider) Credentials() (*Credentials, error) { + p.m.Lock() + defer p.m.Unlock() + + if p.expiration.After(currentTime()) { + return &p.creds, nil + } + + config, err := ini.LoadFile(p.filename) + if err != nil { + return nil, err + } + profile := config.Section(p.profile) + + accessKeyID, ok := profile["aws_access_key_id"] + if !ok { + return nil, fmt.Errorf("profile %s in %s did not contain aws_access_key_id", p.profile, p.filename) + } + + secretAccessKey, ok := profile["aws_secret_access_key"] + if !ok { + return nil, fmt.Errorf("profile %s in %s did not contain aws_secret_access_key", p.profile, p.filename) + } + + sessionToken := profile["aws_session_token"] + + p.creds = Credentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + } + p.expiration = currentTime().Add(p.expiry) + + return &p.creds, nil +} + +type iamProvider struct { + creds Credentials + m sync.Mutex + expiration time.Time +} + +var metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" + +// IAMClient is the HTTP client used to query the metadata endpoint for IAM +// credentials. +var IAMClient = http.Client{ + Timeout: 1 * time.Second, +} + +func (p *iamProvider) Credentials() (*Credentials, error) { + p.m.Lock() + defer p.m.Unlock() + + if p.expiration.After(currentTime()) { + return &p.creds, nil + } + + var body struct { + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + } + + resp, err := IAMClient.Get(metadataCredentialsEndpoint) + if err != nil { + return nil, fmt.Errorf("listing IAM credentials") + } + defer func() { + _ = resp.Body.Close() + }() + + // Take the first line of the body of the metadata endpoint + s := bufio.NewScanner(resp.Body) + if !s.Scan() { + return nil, fmt.Errorf("unable to find default IAM credentials") + } else if s.Err() != nil { + return nil, fmt.Errorf("%s listing IAM credentials", s.Err()) + } + + resp, err = IAMClient.Get(metadataCredentialsEndpoint + s.Text()) + if err != nil { + return nil, fmt.Errorf("getting %s IAM credentials", s.Text()) + } + defer func() { + _ = resp.Body.Close() + }() + + if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { + return nil, fmt.Errorf("decoding %s IAM credentials", s.Text()) + } + + p.creds = Credentials{ + AccessKeyID: body.AccessKeyID, + SecretAccessKey: body.SecretAccessKey, + SessionToken: body.Token, + } + p.expiration = body.Expiration + + return &p.creds, nil +} + +type staticCredentialsProvider struct { + creds Credentials +} + +func (p staticCredentialsProvider) Credentials() (*Credentials, error) { + return &p.creds, nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials_test.go new file mode 100644 index 000000000..3143cebd6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/credentials_test.go @@ -0,0 +1,236 @@ +package aws + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" +) + +func TestEnvCreds(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + prov, err := EnvCreds() + if err != nil { + t.Fatal(err) + } + + creds, err := prov.Credentials() + if err != nil { + t.Fatal(err) + } + + if v, want := creds.AccessKeyID, "access"; v != want { + t.Errorf("Access key ID was %v, expected %v", v, want) + } + + if v, want := creds.SecretAccessKey, "secret"; v != want { + t.Errorf("Secret access key was %v, expected %v", v, want) + } + + if v, want := creds.SessionToken, "token"; v != want { + t.Errorf("Security token was %v, expected %v", v, want) + } +} + +func TestEnvCredsNoAccessKeyID(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + + prov, err := EnvCreds() + if err != ErrAccessKeyIDNotFound { + t.Fatalf("ErrAccessKeyIDNotFound expected, but was %#v/%#v", prov, err) + } +} + +func TestEnvCredsNoSecretAccessKey(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + + prov, err := EnvCreds() + if err != ErrSecretAccessKeyNotFound { + t.Fatalf("ErrSecretAccessKeyNotFound expected, but was %#v/%#v", prov, err) + } +} + +func TestEnvCredsAlternateNames(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SECRET_KEY", "secret") + + prov, err := EnvCreds() + if err != nil { + t.Fatal(err) + } + + creds, err := prov.Credentials() + if err != nil { + t.Fatal(err) + } + + if v, want := creds.AccessKeyID, "access"; v != want { + t.Errorf("Access key ID was %v, expected %v", v, want) + } + + if v, want := creds.SecretAccessKey, "secret"; v != want { + t.Errorf("Secret access key was %v, expected %v", v, want) + } +} + +func TestIAMCreds(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/" { + fmt.Fprintln(w, "/creds") + } else { + fmt.Fprintln(w, `{ + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "2014-12-16T01:51:37Z" +}`) + } + })) + defer server.Close() + + defer func(s string) { + metadataCredentialsEndpoint = s + }(metadataCredentialsEndpoint) + metadataCredentialsEndpoint = server.URL + + defer func() { + currentTime = time.Now + }() + currentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + prov := IAMCreds() + creds, err := prov.Credentials() + if err != nil { + t.Fatal(err) + } + + if v, want := creds.AccessKeyID, "accessKey"; v != want { + t.Errorf("AcccessKeyID was %v, but expected %v", v, want) + } + + if v, want := creds.SecretAccessKey, "secret"; v != want { + t.Errorf("SecretAccessKey was %v, but expected %v", v, want) + } + + if v, want := creds.SessionToken, "token"; v != want { + t.Errorf("SessionToken was %v, but expected %v", v, want) + } +} + +func TestProfileCreds(t *testing.T) { + prov, err := ProfileCreds("example.ini", "", 10*time.Minute) + if err != nil { + t.Fatal(err) + } + + creds, err := prov.Credentials() + if err != nil { + t.Fatal(err) + } + + if v, want := creds.AccessKeyID, "accessKey"; v != want { + t.Errorf("AcccessKeyID was %v, but expected %v", v, want) + } + + if v, want := creds.SecretAccessKey, "secret"; v != want { + t.Errorf("SecretAccessKey was %v, but expected %v", v, want) + } + + if v, want := creds.SessionToken, "token"; v != want { + t.Errorf("SessionToken was %v, but expected %v", v, want) + } +} + +func TestProfileCredsWithoutToken(t *testing.T) { + prov, err := ProfileCreds("example.ini", "no_token", 10*time.Minute) + if err != nil { + t.Fatal(err) + } + + creds, err := prov.Credentials() + if err != nil { + t.Fatal(err) + } + + if v, want := creds.AccessKeyID, "accessKey"; v != want { + t.Errorf("AcccessKeyID was %v, but expected %v", v, want) + } + + if v, want := creds.SecretAccessKey, "secret"; v != want { + t.Errorf("SecretAccessKey was %v, but expected %v", v, want) + } + + if v, want := creds.SessionToken, ""; v != want { + t.Errorf("SessionToken was %v, but expected %v", v, want) + } +} + +func BenchmarkProfileCreds(b *testing.B) { + prov, err := ProfileCreds("example.ini", "", 10*time.Minute) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := prov.Credentials() + if err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkIAMCreds(b *testing.B) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/" { + fmt.Fprintln(w, "/creds") + } else { + fmt.Fprintln(w, `{ + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "2014-12-16T01:51:37Z" +}`) + } + })) + defer server.Close() + + defer func(s string) { + metadataCredentialsEndpoint = s + }(metadataCredentialsEndpoint) + metadataCredentialsEndpoint = server.URL + + defer func() { + currentTime = time.Now + }() + currentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + b.ResetTimer() + + prov := IAMCreds() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := prov.Credentials() + if err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/error.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/error.go new file mode 100644 index 000000000..6b8989911 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/error.go @@ -0,0 +1,26 @@ +package aws + +import "time" + +// An APIError is an error returned by an AWS API. +type APIError struct { + StatusCode int // HTTP status code e.g. 200 + Code string + Message string + RequestID string + Retryable bool + RetryDelay time.Duration + RetryCount uint +} + +func (e APIError) Error() string { + return e.Message +} + +func Error(e error) *APIError { + if err, ok := e.(APIError); ok { + return &err + } else { + return nil + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/example.ini b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/example.ini new file mode 100644 index 000000000..aa2dc506a --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/example.ini @@ -0,0 +1,8 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handler_functions.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handler_functions.go new file mode 100644 index 000000000..4de0f4a11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handler_functions.go @@ -0,0 +1,78 @@ +package aws + +import ( + "fmt" + "io" + "time" +) + +var sleepDelay = func(delay time.Duration) { + time.Sleep(delay) +} + +type lener interface { + Len() int +} + +func BuildContentLength(r *Request) { + if r.HTTPRequest.Header.Get("Content-Length") != "" { + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + cur, _ := body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(cur, 0) // make sure to seek back to original location + length = end - cur + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +} + +func UserAgentHandler(r *Request) { + r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion) +} + +func SendHandler(r *Request) { + r.HTTPResponse, r.Error = r.Service.Config.HTTPClient.Do(r.HTTPRequest) +} + +func ValidateResponseHandler(r *Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 400 { + err := APIError{ + StatusCode: r.HTTPResponse.StatusCode, + RetryCount: r.RetryCount, + } + r.Error = err + err.Retryable = r.Service.ShouldRetry(r) + err.RetryDelay = r.Service.RetryRules(r) + r.Error = err + } +} + +func AfterRetryHandler(r *Request) { + delay := 0 * time.Second + willRetry := false + + if err := Error(r.Error); err != nil { + delay = err.RetryDelay + if err.Retryable && r.RetryCount < r.Service.MaxRetries() { + r.RetryCount++ + willRetry = true + } + } + + if willRetry { + r.Error = nil + sleepDelay(delay) + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers.go new file mode 100644 index 000000000..f7c135fed --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers.go @@ -0,0 +1,65 @@ +package aws + +import "container/list" + +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +func (h *Handlers) copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Init() + h.Build.Init() + h.Send.Init() + h.Sign.Init() + h.Unmarshal.Init() + h.UnmarshalMeta.Init() + h.UnmarshalError.Init() + h.ValidateResponse.Init() + h.Retry.Init() + h.AfterRetry.Init() +} + +type HandlerList struct { + list.List +} + +func (l HandlerList) copy() HandlerList { + var n HandlerList + for e := l.Front(); e != nil; e = e.Next() { + h := e.Value.(func(*Request)) + n.PushBack(h) + } + return n +} + +func (l *HandlerList) Run(r *Request) { + for e := l.Front(); e != nil; e = e.Next() { + h := e.Value.(func(*Request)) + h(r) + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers_test.go new file mode 100644 index 000000000..89e87cc49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/handlers_test.go @@ -0,0 +1,24 @@ +package aws + +import "testing" + +func TestHandlerList(t *testing.T) { + r := &Request{} + l := HandlerList{} + l.PushBack(func(r *Request) { r.Data = Boolean(true) }) + l.Run(r) + if r.Data == nil { + t.Error("Expected handler to execute") + } +} + +func TestMultipleHandlers(t *testing.T) { + r := &Request{} + l := HandlerList{} + l.PushBack(func(r *Request) { r.Data = Boolean(true) }) + l.PushBack(func(r *Request) { r.Data = nil }) + l.Run(r) + if r.Data != nil { + t.Error("Expected handler to execute") + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator.go new file mode 100644 index 000000000..e1cb5cea5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator.go @@ -0,0 +1,80 @@ +package aws + +import ( + "fmt" + "reflect" + "strings" +) + +func ValidateParameters(r *Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = APIError{Code: "InvalidParameter", Message: msg} + } + } +} + +type validator struct { + errors []string +} + +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + notset := false + if f.Tag.Get("required") != "" { + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice: + if fvalue.IsNil() { + notset = true + } + default: + if !fvalue.IsValid() { + notset = true + } + } + } + + if notset { + msg := "missing required parameter: " + path + prefix + f.Name + v.errors = append(v.errors, msg) + } else { + v.validateAny(fvalue, path+prefix+f.Name) + } + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator_test.go new file mode 100644 index 000000000..08deca15a --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/param_validator_test.go @@ -0,0 +1,85 @@ +package aws_test + +import ( + "testing" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +var service = func() *aws.Service { + s := &aws.Service{ + Config: &aws.Config{}, + ServiceName: "mock-service", + APIVersion: "2015-01-01", + } + return s +}() + +type StructShape struct { + RequiredList []*ConditionalStructShape `required:"true"` + RequiredMap *map[string]*ConditionalStructShape `required:"true"` + RequiredBool *bool `required:"true"` + OptionalStruct *ConditionalStructShape + + hiddenParameter *string + + metadataStructureShape +} + +type metadataStructureShape struct { + SDKShapeTraits bool +} + +type ConditionalStructShape struct { + Name *string `required:"true"` + SDKShapeTraits bool +} + +func TestNoErrors(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{}, + RequiredMap: &map[string]*ConditionalStructShape{ + "key1": &ConditionalStructShape{Name: aws.String("Name")}, + "key2": &ConditionalStructShape{Name: aws.String("Name")}, + }, + RequiredBool: aws.Boolean(true), + OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, + } + + req := aws.NewRequest(service, &aws.Operation{}, input, nil) + aws.ValidateParameters(req) + assert.NoError(t, req.Error) +} + +func TestMissingRequiredParameters(t *testing.T) { + input := &StructShape{} + req := aws.NewRequest(service, &aws.Operation{}, input, nil) + aws.ValidateParameters(req) + err := aws.Error(req.Error) + + assert.Error(t, err) + assert.Equal(t, "InvalidParameter", err.Code) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", err.Message) +} + +func TestNestedMissingRequiredParameters(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{&ConditionalStructShape{}}, + RequiredMap: &map[string]*ConditionalStructShape{ + "key1": &ConditionalStructShape{Name: aws.String("Name")}, + "key2": &ConditionalStructShape{}, + }, + RequiredBool: aws.Boolean(true), + OptionalStruct: &ConditionalStructShape{}, + } + + req := aws.NewRequest(service, &aws.Operation{}, input, nil) + aws.ValidateParameters(req) + err := aws.Error(req.Error) + + assert.Error(t, err) + assert.Equal(t, "InvalidParameter", err.Code) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", err.Message) + +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request.go new file mode 100644 index 000000000..1c442b1cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request.go @@ -0,0 +1,149 @@ +package aws + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "time" +) + +type Request struct { + *Service + Handlers Handlers + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount uint + + built bool +} + +type Operation struct { + Name string + HTTPMethod string + HTTPPath string +} + +func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request { + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(service.Endpoint + p) + + r := &Request{ + Service: service, + Handlers: service.Handlers.copy(), + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.Sign() + if r.Error != nil { + return "", r.Error + } else { + return r.HTTPRequest.URL.String(), nil + } +} + +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) Send() error { + r.Sign() + if r.Error != nil { + return r.Error + } + + for { + r.Handlers.Send.Run(r) + if r.Error != nil { + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + return r.Error + } + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + return r.Error + } + + return nil + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request_test.go new file mode 100644 index 000000000..b27b55067 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/request_test.go @@ -0,0 +1,118 @@ +package aws + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type testData struct { + Data string +} + +func body(str string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(str))) +} + +func unmarshal(req *Request) { + defer req.HTTPResponse.Body.Close() + if req.Data != nil { + json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) + } + return +} + +func unmarshalError(req *Request) { + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = err + return + } + if len(bodyBytes) == 0 { + req.Error = APIError{ + StatusCode: req.HTTPResponse.StatusCode, + Message: req.HTTPResponse.Status, + } + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = err + return + } + req.Error = APIError{ + StatusCode: req.HTTPResponse.StatusCode, + Code: jsonErr.Code, + Message: jsonErr.Message, + } +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} + +func TestRequestRecoverRetry(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + http.Response{StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := NewService(&Config{MaxRetries: -1}) + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Init() // mock sending + s.Handlers.Send.PushBack(func(r *Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := NewRequest(s, &Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestRequestExhaustRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay = func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + http.Response{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + } + + s := NewService(&Config{MaxRetries: -1}) + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Init() // mock sending + s.Handlers.Send.PushBack(func(r *Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := NewRequest(s, &Operation{Name: "Operation"}, nil, nil) + err := r.Send() + apiErr := Error(err) + assert.NotNil(t, err) + assert.NotNil(t, apiErr) + assert.Equal(t, 500, apiErr.StatusCode) + assert.Equal(t, "UnknownError", apiErr.Code) + assert.Equal(t, "An error occurred.", apiErr.Message) + assert.Equal(t, 3, int(r.RetryCount)) + assert.True(t, reflect.DeepEqual([]time.Duration{30 * time.Millisecond, 60 * time.Millisecond, 120 * time.Millisecond}, delays)) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/service.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/service.go new file mode 100644 index 000000000..95d590b91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/service.go @@ -0,0 +1,142 @@ +package aws + +import ( + "fmt" + "math" + "net/http" + "net/http/httputil" + "regexp" + "time" + + "github.com/awslabs/aws-sdk-go/internal/endpoints" +) + +type Service struct { + Config *Config + Handlers Handlers + ManualSend bool + ServiceName string + APIVersion string + Endpoint string + JSONVersion string + TargetPrefix string + RetryRules func(*Request) time.Duration + ShouldRetry func(*Request) bool + DefaultMaxRetries uint +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +func NewService(config *Config) *Service { + svc := &Service{Config: config} + svc.Initialize() + return svc +} + +func (s *Service) Initialize() { + if s.Config == nil { + s.Config = &Config{} + } + if s.Config.HTTPClient == nil { + s.Config.HTTPClient = http.DefaultClient + } + + if s.RetryRules == nil { + s.RetryRules = retryRules + } + + if s.ShouldRetry == nil { + s.ShouldRetry = shouldRetry + } + + s.DefaultMaxRetries = 3 + s.Handlers.Build.PushBack(UserAgentHandler) + s.Handlers.Sign.PushBack(BuildContentLength) + s.Handlers.Send.PushBack(SendHandler) + s.Handlers.AfterRetry.PushBack(AfterRetryHandler) + s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler) + s.AddDebugHandlers() + s.buildEndpoint() + + if !s.Config.DisableParamValidation { + s.Handlers.Validate.PushBack(ValidateParameters) + } +} + +func (s *Service) buildEndpoint() { + if s.Config.Endpoint != "" { + s.Endpoint = s.Config.Endpoint + } else { + s.Endpoint = endpoints.EndpointForRegion(s.ServiceName, s.Config.Region) + } + + if !schemeRE.MatchString(s.Endpoint) { + scheme := "https" + if s.Config.DisableSSL { + scheme = "http" + } + s.Endpoint = scheme + "://" + s.Endpoint + } +} + +func (s *Service) AddDebugHandlers() { + out := s.Config.Logger + if s.Config.LogLevel == 0 { + return + } + + s.Handlers.Sign.PushBack(func(r *Request) { + dumpedBody, _ := httputil.DumpRequest(r.HTTPRequest, true) + + fmt.Fprintf(out, "=> [%s] %s.%s(%+v)\n", r.Time, + r.Service.ServiceName, r.Operation.Name, r.Params) + fmt.Fprintf(out, "---[ REQUEST PRE-SIGN ]------------------------------\n") + fmt.Fprintf(out, "%s\n", string(dumpedBody)) + fmt.Fprintf(out, "-----------------------------------------------------\n") + }) + s.Handlers.Send.PushFront(func(r *Request) { + dumpedBody, _ := httputil.DumpRequest(r.HTTPRequest, true) + + fmt.Fprintf(out, "---[ REQUEST POST-SIGN ]-----------------------------\n") + fmt.Fprintf(out, "%s\n", string(dumpedBody)) + fmt.Fprintf(out, "-----------------------------------------------------\n") + }) + s.Handlers.Send.PushBack(func(r *Request) { + fmt.Fprintf(out, "---[ RESPONSE ]--------------------------------------\n") + if r.HTTPResponse != nil { + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, true) + fmt.Fprintf(out, "%s\n", string(dumpedBody)) + } else if r.Error != nil { + fmt.Fprintf(out, "%s\n", r.Error) + } + fmt.Fprintf(out, "-----------------------------------------------------\n") + }) +} + +func (s *Service) MaxRetries() uint { + if s.Config.MaxRetries < 0 { + return s.DefaultMaxRetries + } else { + return uint(s.Config.MaxRetries) + } +} + +func retryRules(r *Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 30 + return delay * time.Millisecond +} + +func shouldRetry(r *Request) bool { + if err := Error(r.Error); err != nil { + if err.StatusCode >= 500 { + return true + } + + switch err.Code { + case "ExpiredTokenException": + case "ProvisionedThroughputExceededException", "Throttling": + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..5da09114c --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/types.go @@ -0,0 +1,63 @@ +package aws + +import ( + "io" + "time" +) + +// String converts a Go string into a string pointer. +func String(v string) *string { + return &v +} + +// Boolean converts a Go bool into a boolean pointer. +func Boolean(v bool) *bool { + return &v +} + +// Long converts a Go int64 into a long pointer. +func Long(v int64) *int64 { + return &v +} + +// Double converts a Go float64 into a double pointer. +func Double(v float64) *float64 { + return &v +} + +// Time converts a Go Time into a Time pointer +func Time(t time.Time) *time.Time { + return &t +} + +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +type ReaderSeekerCloser struct { + r io.Reader +} + +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..f673d470a --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/aws/version.go @@ -0,0 +1,5 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +const SDKName = "aws-sdk-go" +const SDKVersion = "0.5.0" diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.go new file mode 100644 index 000000000..12e4fb529 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.go @@ -0,0 +1,24 @@ +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go + +import "strings" + +func EndpointForRegion(svcName, region string) string { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + return ep + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.json new file mode 100644 index 000000000..6c35090c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints.json @@ -0,0 +1,67 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_map.go new file mode 100644 index 000000000..733d2bd28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_map.go @@ -0,0 +1,78 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": endpointEntry{ + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": endpointEntry{ + Endpoint: "cloudfront.amazonaws.com", + }, + "*/iam": endpointEntry{ + Endpoint: "iam.amazonaws.com", + }, + "*/importexport": endpointEntry{ + Endpoint: "importexport.amazonaws.com", + }, + "*/route53": endpointEntry{ + Endpoint: "route53.amazonaws.com", + }, + "*/sts": endpointEntry{ + Endpoint: "sts.amazonaws.com", + }, + "ap-northeast-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": endpointEntry{ + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": endpointEntry{ + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": endpointEntry{ + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": endpointEntry{ + Endpoint: "sdb.amazonaws.com", + }, + "us-gov-west-1/iam": endpointEntry{ + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": endpointEntry{ + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": endpointEntry{ + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_test.go new file mode 100644 index 000000000..84efb893e --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/endpoints/endpoints_test.go @@ -0,0 +1,25 @@ +package endpoints + +import "testing" + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"} + + for _, name := range svcs { + if EndpointForRegion(name, region) != name+".amazonaws.com" { + t.Errorf("expected endpoint for %s to equal %s.amazonaws.com", name, name) + } + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"} + + for _, name := range svcs { + if EndpointForRegion(name, region) != name+"."+region+".amazonaws.com.cn" { + t.Errorf("expected endpoint for %s to equal %s.%s.amazonaws.com.cn", name, name, region) + } + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build.go new file mode 100644 index 000000000..74b721658 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build.go @@ -0,0 +1,30 @@ +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go + +import ( + "net/url" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/query/queryutil" +) + +func Build(r *aws.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.Service.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = err + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build_test.go new file mode 100644 index 000000000..bbba7b4cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/build_test.go @@ -0,0 +1,1167 @@ +package query_test + +import ( + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/query" + "github.com/awslabs/aws-sdk-go/internal/signer/v4" + + "bytes" + "encoding/json" + "encoding/xml" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/awslabs/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} + +// InputService1ProtocolTest is a client for InputService1ProtocolTest. +type InputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService1ProtocolTest client. +func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice1protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService1ProtocolTest{service} +} + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + if opInputService1TestCaseOperation1 == nil { + opInputService1TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService1TestCaseOperation1, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (output *InputService1TestShapeInputService1TestCaseOperation1Output, err error) { + req, out := c.InputService1TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService1TestCaseOperation1 *aws.Operation + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService1TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService1TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService2ProtocolTest is a client for InputService2ProtocolTest. +type InputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService2ProtocolTest client. +func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice2protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService2ProtocolTest{service} +} + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + if opInputService2TestCaseOperation1 == nil { + opInputService2TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService2TestCaseOperation1, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (output *InputService2TestShapeInputService2TestCaseOperation1Output, err error) { + req, out := c.InputService2TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService2TestCaseOperation1 *aws.Operation + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputShape struct { + StructArg *InputService2TestShapeStructType `type:"structure"` + + metadataInputService2TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService2TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeStructType struct { + ScalarArg *string `type:"string"` + + metadataInputService2TestShapeStructType `json:"-", xml:"-"` +} + +type metadataInputService2TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService3ProtocolTest is a client for InputService3ProtocolTest. +type InputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService3ProtocolTest client. +func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice3protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService3ProtocolTest{service} +} + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + if opInputService3TestCaseOperation1 == nil { + opInputService3TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService3TestCaseOperation1, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (output *InputService3TestShapeInputService3TestCaseOperation1Output, err error) { + req, out := c.InputService3TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService3TestCaseOperation1 *aws.Operation + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + ListArg []*string `type:"list"` + + metadataInputService3TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService3TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService4ProtocolTest is a client for InputService4ProtocolTest. +type InputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService4ProtocolTest client. +func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice4protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService4ProtocolTest{service} +} + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + if opInputService4TestCaseOperation1 == nil { + opInputService4TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService4TestCaseOperation1, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (output *InputService4TestShapeInputService4TestCaseOperation1Output, err error) { + req, out := c.InputService4TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService4TestCaseOperation1 *aws.Operation + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + ListArg []*string `type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` + + metadataInputService4TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService4TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService5ProtocolTest is a client for InputService5ProtocolTest. +type InputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService5ProtocolTest client. +func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice5protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService5ProtocolTest{service} +} + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + if opInputService5TestCaseOperation1 == nil { + opInputService5TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService5TestCaseOperation1, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (output *InputService5TestShapeInputService5TestCaseOperation1Output, err error) { + req, out := c.InputService5TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService5TestCaseOperation1 *aws.Operation + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + MapArg *map[string]*string `type:"map"` + + metadataInputService5TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService5TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService6ProtocolTest is a client for InputService6ProtocolTest. +type InputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService6ProtocolTest client. +func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice6protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService6ProtocolTest{service} +} + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + if opInputService6TestCaseOperation1 == nil { + opInputService6TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService6TestCaseOperation1, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (output *InputService6TestShapeInputService6TestCaseOperation1Output, err error) { + req, out := c.InputService6TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService6TestCaseOperation1 *aws.Operation + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputShape struct { + BlobArg []byte `type:"blob"` + + metadataInputService6TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService6TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService7ProtocolTest is a client for InputService7ProtocolTest. +type InputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService7ProtocolTest client. +func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice7protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService7ProtocolTest{service} +} + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + if opInputService7TestCaseOperation1 == nil { + opInputService7TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService7TestCaseOperation1, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (output *InputService7TestShapeInputService7TestCaseOperation1Output, err error) { + req, out := c.InputService7TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService7TestCaseOperation1 *aws.Operation + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService7TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService7TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService8ProtocolTest is a client for InputService8ProtocolTest. +type InputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService8ProtocolTest client. +func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice8protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService8ProtocolTest{service} +} + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + if opInputService8TestCaseOperation1 == nil { + opInputService8TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation1, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestCaseOperation1Output, err error) { + req, out := c.InputService8TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation1 *aws.Operation + +// InputService8TestCaseOperation2Request generates a request for the InputService8TestCaseOperation2 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation2Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation2Output) { + if opInputService8TestCaseOperation2 == nil { + opInputService8TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation2, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation2(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestCaseOperation2Output, err error) { + req, out := c.InputService8TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation2 *aws.Operation + +// InputService8TestCaseOperation3Request generates a request for the InputService8TestCaseOperation3 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation3Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output) { + if opInputService8TestCaseOperation3 == nil { + opInputService8TestCaseOperation3 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation3, input, output) + output = &InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation3(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output, err error) { + req, out := c.InputService8TestCaseOperation3Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation3 *aws.Operation + +// InputService8TestCaseOperation4Request generates a request for the InputService8TestCaseOperation4 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation4Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation4Output) { + if opInputService8TestCaseOperation4 == nil { + opInputService8TestCaseOperation4 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation4, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation4(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestCaseOperation4Output, err error) { + req, out := c.InputService8TestCaseOperation4Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation4 *aws.Operation + +// InputService8TestCaseOperation5Request generates a request for the InputService8TestCaseOperation5 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation5Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output) { + if opInputService8TestCaseOperation5 == nil { + opInputService8TestCaseOperation5 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation5, input, output) + output = &InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation5(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output, err error) { + req, out := c.InputService8TestCaseOperation5Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation5 *aws.Operation + +// InputService8TestCaseOperation6Request generates a request for the InputService8TestCaseOperation6 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation6Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation6Output) { + if opInputService8TestCaseOperation6 == nil { + opInputService8TestCaseOperation6 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation6, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation6(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestCaseOperation6Output, err error) { + req, out := c.InputService8TestCaseOperation6Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation6 *aws.Operation + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestCaseOperation2Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestCaseOperation4Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation4Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation4Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestCaseOperation6Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation6Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation6Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output struct { + metadataInputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestShapeInputService8TestCaseOperation3Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output struct { + metadataInputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestShapeInputService8TestCaseOperation5Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestShapeRecursiveStructType struct { + NoRecurse *string `type:"string"` + + RecursiveList []*InputService8TestShapeInputService8TestShapeRecursiveStructType `type:"list"` + + RecursiveMap *map[string]*InputService8TestShapeInputService8TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService8TestShapeInputService8TestShapeRecursiveStructType `type:"structure"` + + metadataInputService8TestShapeInputService8TestShapeRecursiveStructType `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestShapeRecursiveStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputShape struct { + RecursiveStruct *InputService8TestShapeInputService8TestShapeRecursiveStructType `type:"structure"` + + metadataInputService8TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewInputService1ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + svc := NewInputService2ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService2TestShapeInputShape{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + svc := NewInputService3ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewInputService4ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeMapTypeCase1(t *testing.T) { + svc := NewInputService5ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService5TestShapeInputShape{ + MapArg: &map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + svc := NewInputService6ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService6TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestTimestampValuesCase1(t *testing.T) { + svc := NewInputService7ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService7TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase1(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase2(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService8TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase3(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService8TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase4(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveList: []*InputService8TestShapeInputService8TestShapeRecursiveStructType{ + &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService8TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase5(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveList: []*InputService8TestShapeInputService8TestShapeRecursiveStructType{ + &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService8TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestRecursiveShapesCase6(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + RecursiveStruct: &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + RecursiveMap: &map[string]*InputService8TestShapeInputService8TestShapeRecursiveStructType{ + "bar": &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + "foo": &InputService8TestShapeInputService8TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService8TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=bar&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=bar&RecursiveStruct.RecursiveMap.entry.2.key=foo&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..fe8850902 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go @@ -0,0 +1,198 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + value := elemOf(value.Field(i)) + field := t.Field(i) + var name string + + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, value, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + ".key" + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + ".key" + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + ".value" + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + ".value" + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + v.Set(name, base64.StdEncoding.EncodeToString(value)) + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal.go new file mode 100644 index 000000000..92a740dc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal.go @@ -0,0 +1,26 @@ +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +func Unmarshal(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = err + return + } + } +} + +func UnmarshalMeta(r *aws.Request) { + // TODO implement unmarshaling of request IDs +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..cf82eef93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_error.go @@ -0,0 +1,31 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/awslabs/aws-sdk-go/aws" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +func UnmarshalError(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = err + } else { + r.Error = aws.APIError{ + StatusCode: r.HTTPResponse.StatusCode, + Code: resp.Code, + Message: resp.Message, + } + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_test.go new file mode 100644 index 000000000..8d67b2dac --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/query/unmarshal_test.go @@ -0,0 +1,1361 @@ +package query_test + +import ( + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/query" + "github.com/awslabs/aws-sdk-go/internal/signer/v4" + + "bytes" + "encoding/json" + "encoding/xml" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/awslabs/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} + +// OutputService1ProtocolTest is a client for OutputService1ProtocolTest. +type OutputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService1ProtocolTest client. +func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice1protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService1ProtocolTest{service} +} + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputService1TestShapeOutputShape) { + if opOutputService1TestCaseOperation1 == nil { + opOutputService1TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService1TestCaseOperation1, input, output) + output = &OutputService1TestShapeOutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (output *OutputService1TestShapeOutputService1TestShapeOutputShape, err error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService1TestCaseOperation1 *aws.Operation + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestShapeOutputShape struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputService1TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService2ProtocolTest is a client for OutputService2ProtocolTest. +type OutputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService2ProtocolTest client. +func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice2protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService2ProtocolTest{service} +} + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { + if opOutputService2TestCaseOperation1 == nil { + opOutputService2TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService2TestCaseOperation1, input, output) + output = &OutputService2TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (output *OutputService2TestShapeOutputShape, err error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService2TestCaseOperation1 *aws.Operation + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputShape struct { + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + metadataOutputService2TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService2TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService3ProtocolTest is a client for OutputService3ProtocolTest. +type OutputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService3ProtocolTest client. +func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice3protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService3ProtocolTest{service} +} + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { + if opOutputService3TestCaseOperation1 == nil { + opOutputService3TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService3TestCaseOperation1, input, output) + output = &OutputService3TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (output *OutputService3TestShapeOutputShape, err error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService3TestCaseOperation1 *aws.Operation + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputShape struct { + Blob []byte `type:"blob"` + + metadataOutputService3TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService3TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService4ProtocolTest is a client for OutputService4ProtocolTest. +type OutputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService4ProtocolTest client. +func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice4protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService4ProtocolTest{service} +} + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { + if opOutputService4TestCaseOperation1 == nil { + opOutputService4TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService4TestCaseOperation1, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (output *OutputService4TestShapeOutputShape, err error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService4TestCaseOperation1 *aws.Operation + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + ListMember []*string `type:"list"` + + metadataOutputService4TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService4TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService5ProtocolTest is a client for OutputService5ProtocolTest. +type OutputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService5ProtocolTest client. +func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice5protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService5ProtocolTest{service} +} + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { + if opOutputService5TestCaseOperation1 == nil { + opOutputService5TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService5TestCaseOperation1, input, output) + output = &OutputService5TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (output *OutputService5TestShapeOutputShape, err error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService5TestCaseOperation1 *aws.Operation + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputShape struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService5TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService5TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService6ProtocolTest is a client for OutputService6ProtocolTest. +type OutputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService6ProtocolTest client. +func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice6protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService6ProtocolTest{service} +} + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { + if opOutputService6TestCaseOperation1 == nil { + opOutputService6TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService6TestCaseOperation1, input, output) + output = &OutputService6TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (output *OutputService6TestShapeOutputShape, err error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService6TestCaseOperation1 *aws.Operation + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService6TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService6TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService7ProtocolTest is a client for OutputService7ProtocolTest. +type OutputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService7ProtocolTest client. +func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice7protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService7ProtocolTest{service} +} + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { + if opOutputService7TestCaseOperation1 == nil { + opOutputService7TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService7TestCaseOperation1, input, output) + output = &OutputService7TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (output *OutputService7TestShapeOutputShape, err error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService7TestCaseOperation1 *aws.Operation + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService7TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService7TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService8ProtocolTest is a client for OutputService8ProtocolTest. +type OutputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService8ProtocolTest client. +func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice8protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService8ProtocolTest{service} +} + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { + if opOutputService8TestCaseOperation1 == nil { + opOutputService8TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService8TestCaseOperation1, input, output) + output = &OutputService8TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (output *OutputService8TestShapeOutputShape, err error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService8TestCaseOperation1 *aws.Operation + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputShape struct { + List []*OutputService8TestShapeStructureShape `type:"list"` + + metadataOutputService8TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService8TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService8TestShapeStructureShape `json:"-", xml:"-"` +} + +type metadataOutputService8TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService9ProtocolTest is a client for OutputService9ProtocolTest. +type OutputService9ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService9ProtocolTest client. +func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice9protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService9ProtocolTest{service} +} + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) { + if opOutputService9TestCaseOperation1 == nil { + opOutputService9TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService9TestCaseOperation1, input, output) + output = &OutputService9TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (output *OutputService9TestShapeOutputShape, err error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService9TestCaseOperation1 *aws.Operation + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeOutputShape struct { + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` + + metadataOutputService9TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService9TestShapeStructureShape `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService10ProtocolTest is a client for OutputService10ProtocolTest. +type OutputService10ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService10ProtocolTest client. +func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice10protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService10ProtocolTest{service} +} + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) { + if opOutputService10TestCaseOperation1 == nil { + opOutputService10TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService10TestCaseOperation1, input, output) + output = &OutputService10TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (output *OutputService10TestShapeOutputShape, err error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService10TestCaseOperation1 *aws.Operation + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService10TestShapeOutputShape struct { + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` + + metadataOutputService10TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService10TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService11ProtocolTest is a client for OutputService11ProtocolTest. +type OutputService11ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService11ProtocolTest client. +func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice11protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService11ProtocolTest{service} +} + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) { + if opOutputService11TestCaseOperation1 == nil { + opOutputService11TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService11TestCaseOperation1, input, output) + output = &OutputService11TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (output *OutputService11TestShapeOutputShape, err error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService11TestCaseOperation1 *aws.Operation + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeOutputShape struct { + Map *map[string]*OutputService11TestShapeStructType `type:"map"` + + metadataOutputService11TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService11TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeStructType struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService11TestShapeStructType `json:"-", xml:"-"` +} + +type metadataOutputService11TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService12ProtocolTest is a client for OutputService12ProtocolTest. +type OutputService12ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService12ProtocolTest client. +func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice12protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService12ProtocolTest{service} +} + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) { + if opOutputService12TestCaseOperation1 == nil { + opOutputService12TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService12TestCaseOperation1, input, output) + output = &OutputService12TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (output *OutputService12TestShapeOutputShape, err error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService12TestCaseOperation1 *aws.Operation + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService12TestShapeOutputShape struct { + Map *map[string]*string `type:"map" flattened:"true"` + + metadataOutputService12TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService12TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService13ProtocolTest is a client for OutputService13ProtocolTest. +type OutputService13ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService13ProtocolTest client. +func NewOutputService13ProtocolTest(config *aws.Config) *OutputService13ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice13protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService13ProtocolTest{service} +} + +// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *aws.Request, output *OutputService13TestShapeOutputShape) { + if opOutputService13TestCaseOperation1 == nil { + opOutputService13TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService13TestCaseOperation1, input, output) + output = &OutputService13TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (output *OutputService13TestShapeOutputShape, err error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService13TestCaseOperation1 *aws.Operation + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService13TestShapeOutputShape struct { + Map *map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + metadataOutputService13TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService13TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService14ProtocolTest is a client for OutputService14ProtocolTest. +type OutputService14ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService14ProtocolTest client. +func NewOutputService14ProtocolTest(config *aws.Config) *OutputService14ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice14protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService14ProtocolTest{service} +} + +// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *aws.Request, output *OutputService14TestShapeOutputShape) { + if opOutputService14TestCaseOperation1 == nil { + opOutputService14TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService14TestCaseOperation1, input, output) + output = &OutputService14TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (output *OutputService14TestShapeOutputShape, err error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService14TestCaseOperation1 *aws.Operation + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService14TestShapeOutputShape struct { + Map *map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` + + metadataOutputService14TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService14TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewOutputService1ProtocolTest(nil) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, 200, *out.Long) + assert.Equal(t, 123, *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + svc := NewOutputService2ProtocolTest(nil) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + svc := NewOutputService3ProtocolTest(nil) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + svc := NewOutputService4ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + svc := NewOutputService5ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewOutputService6ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + svc := NewOutputService7ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + svc := NewOutputService8ProtocolTest(nil) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + svc := NewOutputService9ProtocolTest(nil) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + svc := NewOutputService10ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + svc := NewOutputService11ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"].Foo) + assert.Equal(t, "bar", *(*out.Map)["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + svc := NewOutputService12ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"]) + assert.Equal(t, "bar", *(*out.Map)["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + svc := NewOutputService13ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *(*out.Map)["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + svc := NewOutputService14ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"]) + assert.Equal(t, "bar", *(*out.Map)["qux"]) + +} + diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/build.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/build.go new file mode 100644 index 000000000..3ff667d94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/build.go @@ -0,0 +1,215 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/awslabs/aws-sdk-go/aws" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +func Build(r *aws.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *aws.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + switch field.Tag.Get("location") { + case "headers": // header maps + buildHeaderMap(r, m, field.Tag.Get("locationName")) + case "header": + buildHeader(r, m, name) + case "uri": + buildURI(r, m, name) + case "querystring": + buildQueryString(r, m, name, query) + } + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetBufferBody([]byte(reader)) + default: + r.Error = fmt.Errorf("unknown payload type %s", payload.Type()) + } + } + } + } + } +} + +func buildHeader(r *aws.Request, v reflect.Value, name string) { + str, err := convertType(v) + if err != nil { + r.Error = err + } else if str != nil { + r.HTTPRequest.Header.Add(name, *str) + } +} + +func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + + if err != nil { + r.Error = err + } else if str != nil { + r.HTTPRequest.Header.Add(prefix+key.String(), *str) + } + } +} + +func buildURI(r *aws.Request, v reflect.Value, name string) { + value, err := convertType(v) + if err != nil { + r.Error = err + } else if value != nil { + uri := r.HTTPRequest.URL.Path + uri = strings.Replace(uri, "{"+name+"}", escapePath(*value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", escapePath(*value, false), -1) + r.HTTPRequest.URL.Path = uri + } +} + +func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) { + str, err := convertType(v) + if err != nil { + r.Error = err + } else if str != nil { + query.Set(name, *str) + } +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + // clean up path + urlPath = path.Clean(urlPath) + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool +var noEscapeInitialized = false + +// initialise noEscape +func initNoEscape() { + for i := range noEscape { + // Amazon expects every character except these escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// escapePath escapes part of a URL path in Amazon style +func escapePath(path string, encodeSep bool) string { + if !noEscapeInitialized { + initNoEscape() + noEscapeInitialized = true + } + + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (*string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return nil, nil + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return nil, err + } + return &str, nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/payload.go new file mode 100644 index 000000000..1e15f66f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/payload.go @@ -0,0 +1,43 @@ +package rest + +import "reflect" + +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/unmarshal.go new file mode 100644 index 000000000..d0e216c1e --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/rest/unmarshal.go @@ -0,0 +1,174 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/awslabs/aws-sdk-go/aws" +) + +func Unmarshal(r *aws.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = err + } else { + payload.Set(reflect.ValueOf(b)) + } + case string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = err + } else { + payload.Set(reflect.ValueOf(string(b))) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = fmt.Errorf("unknown payload type %s", payload.Type()) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *aws.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = err + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = err + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case *map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(&out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } else { + v.Set(reflect.ValueOf(&b)) + } + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } else { + v.Set(reflect.ValueOf(&b)) + } + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } else { + v.Set(reflect.ValueOf(&i)) + } + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } else { + v.Set(reflect.ValueOf(&f)) + } + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } else { + v.Set(reflect.ValueOf(&t)) + } + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/build_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/build_test.go new file mode 100644 index 000000000..48d72ca53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/build_test.go @@ -0,0 +1,2571 @@ +package restxml_test + +import ( + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/restxml" + "github.com/awslabs/aws-sdk-go/internal/signer/v4" + + "bytes" + "encoding/json" + "encoding/xml" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/awslabs/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} + +// InputService1ProtocolTest is a client for InputService1ProtocolTest. +type InputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService1ProtocolTest client. +func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice1protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService1ProtocolTest{service} +} + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + if opInputService1TestCaseOperation1 == nil { + opInputService1TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService1TestCaseOperation1, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (output *InputService1TestShapeInputService1TestCaseOperation1Output, err error) { + req, out := c.InputService1TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService1TestCaseOperation1 *aws.Operation + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + if opInputService1TestCaseOperation2 == nil { + opInputService1TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "PUT", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService1TestCaseOperation2, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (output *InputService1TestShapeInputService1TestCaseOperation2Output, err error) { + req, out := c.InputService1TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService1TestCaseOperation2 *aws.Operation + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + Description *string `type:"string"` + + Name *string `type:"string"` + + metadataInputService1TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService1TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService2ProtocolTest is a client for InputService2ProtocolTest. +type InputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService2ProtocolTest client. +func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice2protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService2ProtocolTest{service} +} + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + if opInputService2TestCaseOperation1 == nil { + opInputService2TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService2TestCaseOperation1, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (output *InputService2TestShapeInputService2TestCaseOperation1Output, err error) { + req, out := c.InputService2TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService2TestCaseOperation1 *aws.Operation + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputShape struct { + First *bool `type:"boolean"` + + Fourth *int64 `type:"integer"` + + Second *bool `type:"boolean"` + + Third *float64 `type:"float"` + + metadataInputService2TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService2TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService3ProtocolTest is a client for InputService3ProtocolTest. +type InputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService3ProtocolTest client. +func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice3protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService3ProtocolTest{service} +} + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + if opInputService3TestCaseOperation1 == nil { + opInputService3TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService3TestCaseOperation1, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (output *InputService3TestShapeInputService3TestCaseOperation1Output, err error) { + req, out := c.InputService3TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService3TestCaseOperation1 *aws.Operation + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + Description *string `type:"string"` + + SubStructure *InputService3TestShapeSubStructure `type:"structure"` + + metadataInputService3TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService3TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +type InputService3TestShapeSubStructure struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService3TestShapeSubStructure `json:"-", xml:"-"` +} + +type metadataInputService3TestShapeSubStructure struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService4ProtocolTest is a client for InputService4ProtocolTest. +type InputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService4ProtocolTest client. +func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice4protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService4ProtocolTest{service} +} + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + if opInputService4TestCaseOperation1 == nil { + opInputService4TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService4TestCaseOperation1, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (output *InputService4TestShapeInputService4TestCaseOperation1Output, err error) { + req, out := c.InputService4TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService4TestCaseOperation1 *aws.Operation + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + Description *string `type:"string"` + + SubStructure *InputService4TestShapeSubStructure `type:"structure"` + + metadataInputService4TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService4TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +type InputService4TestShapeSubStructure struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService4TestShapeSubStructure `json:"-", xml:"-"` +} + +type metadataInputService4TestShapeSubStructure struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService5ProtocolTest is a client for InputService5ProtocolTest. +type InputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService5ProtocolTest client. +func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice5protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService5ProtocolTest{service} +} + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + if opInputService5TestCaseOperation1 == nil { + opInputService5TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService5TestCaseOperation1, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (output *InputService5TestShapeInputService5TestCaseOperation1Output, err error) { + req, out := c.InputService5TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService5TestCaseOperation1 *aws.Operation + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + ListParam []*string `type:"list"` + + metadataInputService5TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService5TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService6ProtocolTest is a client for InputService6ProtocolTest. +type InputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService6ProtocolTest client. +func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice6protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService6ProtocolTest{service} +} + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + if opInputService6TestCaseOperation1 == nil { + opInputService6TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService6TestCaseOperation1, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (output *InputService6TestShapeInputService6TestCaseOperation1Output, err error) { + req, out := c.InputService6TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService6TestCaseOperation1 *aws.Operation + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputShape struct { + ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"` + + metadataInputService6TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService6TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService7ProtocolTest is a client for InputService7ProtocolTest. +type InputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService7ProtocolTest client. +func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice7protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService7ProtocolTest{service} +} + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + if opInputService7TestCaseOperation1 == nil { + opInputService7TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService7TestCaseOperation1, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (output *InputService7TestShapeInputService7TestCaseOperation1Output, err error) { + req, out := c.InputService7TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService7TestCaseOperation1 *aws.Operation + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + ListParam []*string `type:"list" flattened:"true"` + + metadataInputService7TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService7TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService8ProtocolTest is a client for InputService8ProtocolTest. +type InputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService8ProtocolTest client. +func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice8protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService8ProtocolTest{service} +} + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + if opInputService8TestCaseOperation1 == nil { + opInputService8TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService8TestCaseOperation1, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (output *InputService8TestShapeInputService8TestCaseOperation1Output, err error) { + req, out := c.InputService8TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService8TestCaseOperation1 *aws.Operation + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputShape struct { + ListParam []*string `locationName:"item" type:"list" flattened:"true"` + + metadataInputService8TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService8TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService9ProtocolTest is a client for InputService9ProtocolTest. +type InputService9ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService9ProtocolTest client. +func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice9protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService9ProtocolTest{service} +} + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + if opInputService9TestCaseOperation1 == nil { + opInputService9TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService9TestCaseOperation1, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (output *InputService9TestShapeInputService9TestCaseOperation1Output, err error) { + req, out := c.InputService9TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService9TestCaseOperation1 *aws.Operation + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputShape struct { + ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"` + + metadataInputService9TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService9TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +type InputService9TestShapeSingleFieldStruct struct { + Element *string `locationName:"value" type:"string"` + + metadataInputService9TestShapeSingleFieldStruct `json:"-", xml:"-"` +} + +type metadataInputService9TestShapeSingleFieldStruct struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService10ProtocolTest is a client for InputService10ProtocolTest. +type InputService10ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService10ProtocolTest client. +func NewInputService10ProtocolTest(config *aws.Config) *InputService10ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice10protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService10ProtocolTest{service} +} + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputShape) (req *aws.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + if opInputService10TestCaseOperation1 == nil { + opInputService10TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + } + + req = aws.NewRequest(c.Service, opInputService10TestCaseOperation1, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputShape) (output *InputService10TestShapeInputService10TestCaseOperation1Output, err error) { + req, out := c.InputService10TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService10TestCaseOperation1 *aws.Operation + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService10TestShapeInputShape struct { + StructureParam *InputService10TestShapeStructureShape `type:"structure"` + + metadataInputService10TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService10TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +type InputService10TestShapeStructureShape struct { + B []byte `locationName:"b" type:"blob"` + + T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService10TestShapeStructureShape `json:"-", xml:"-"` +} + +type metadataInputService10TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService11ProtocolTest is a client for InputService11ProtocolTest. +type InputService11ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService11ProtocolTest client. +func NewInputService11ProtocolTest(config *aws.Config) *InputService11ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice11protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService11ProtocolTest{service} +} + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputShape) (req *aws.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + if opInputService11TestCaseOperation1 == nil { + opInputService11TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService11TestCaseOperation1, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputShape) (output *InputService11TestShapeInputService11TestCaseOperation1Output, err error) { + req, out := c.InputService11TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService11TestCaseOperation1 *aws.Operation + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService11TestShapeInputShape struct { + Foo *map[string]*string `location:"headers" locationName:"x-foo-" type:"map"` + + metadataInputService11TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService11TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +// InputService12ProtocolTest is a client for InputService12ProtocolTest. +type InputService12ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService12ProtocolTest client. +func NewInputService12ProtocolTest(config *aws.Config) *InputService12ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice12protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService12ProtocolTest{service} +} + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *aws.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + if opInputService12TestCaseOperation1 == nil { + opInputService12TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService12TestCaseOperation1, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (output *InputService12TestShapeInputService12TestCaseOperation1Output, err error) { + req, out := c.InputService12TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService12TestCaseOperation1 *aws.Operation + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + Foo *string `locationName:"foo" type:"string"` + + metadataInputService12TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService12TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Foo"` +} + +// InputService13ProtocolTest is a client for InputService13ProtocolTest. +type InputService13ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService13ProtocolTest client. +func NewInputService13ProtocolTest(config *aws.Config) *InputService13ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice13protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService13ProtocolTest{service} +} + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + if opInputService13TestCaseOperation1 == nil { + opInputService13TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService13TestCaseOperation1, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (output *InputService13TestShapeInputService13TestCaseOperation1Output, err error) { + req, out := c.InputService13TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService13TestCaseOperation1 *aws.Operation + +// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + if opInputService13TestCaseOperation2 == nil { + opInputService13TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService13TestCaseOperation2, input, output) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (output *InputService13TestShapeInputService13TestCaseOperation2Output, err error) { + req, out := c.InputService13TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService13TestCaseOperation2 *aws.Operation + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + metadataInputService13TestShapeInputService13TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService13TestShapeInputService13TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + metadataInputService13TestShapeInputService13TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService13TestShapeInputService13TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + Foo []byte `locationName:"foo" type:"blob"` + + metadataInputService13TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService13TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Foo"` +} + +// InputService14ProtocolTest is a client for InputService14ProtocolTest. +type InputService14ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService14ProtocolTest client. +func NewInputService14ProtocolTest(config *aws.Config) *InputService14ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice14protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService14ProtocolTest{service} +} + +// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + if opInputService14TestCaseOperation1 == nil { + opInputService14TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService14TestCaseOperation1, input, output) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (output *InputService14TestShapeInputService14TestCaseOperation1Output, err error) { + req, out := c.InputService14TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService14TestCaseOperation1 *aws.Operation + +// InputService14TestCaseOperation2Request generates a request for the InputService14TestCaseOperation2 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { + if opInputService14TestCaseOperation2 == nil { + opInputService14TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService14TestCaseOperation2, input, output) + output = &InputService14TestShapeInputService14TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (output *InputService14TestShapeInputService14TestCaseOperation2Output, err error) { + req, out := c.InputService14TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService14TestCaseOperation2 *aws.Operation + +type InputService14TestShapeFooShape struct { + Baz *string `locationName:"baz" type:"string"` + + metadataInputService14TestShapeFooShape `json:"-", xml:"-"` +} + +type metadataInputService14TestShapeFooShape struct { + SDKShapeTraits bool `locationName:"foo" type:"structure"` +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + metadataInputService14TestShapeInputService14TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService14TestShapeInputService14TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService14TestShapeInputService14TestCaseOperation2Output struct { + metadataInputService14TestShapeInputService14TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService14TestShapeInputService14TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService14TestShapeInputShape struct { + Foo *InputService14TestShapeFooShape `locationName:"foo" type:"structure"` + + metadataInputService14TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService14TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Foo"` +} + +// InputService15ProtocolTest is a client for InputService15ProtocolTest. +type InputService15ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService15ProtocolTest client. +func NewInputService15ProtocolTest(config *aws.Config) *InputService15ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice15protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService15ProtocolTest{service} +} + +// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *aws.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + if opInputService15TestCaseOperation1 == nil { + opInputService15TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/", + } + } + + req = aws.NewRequest(c.Service, opInputService15TestCaseOperation1, input, output) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (output *InputService15TestShapeInputService15TestCaseOperation1Output, err error) { + req, out := c.InputService15TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService15TestCaseOperation1 *aws.Operation + +type InputService15TestShapeGrant struct { + Grantee *InputService15TestShapeGrantee `type:"structure"` + + metadataInputService15TestShapeGrant `json:"-", xml:"-"` +} + +type metadataInputService15TestShapeGrant struct { + SDKShapeTraits bool `locationName:"Grant" type:"structure"` +} + +type InputService15TestShapeGrantee struct { + EmailAddress *string `type:"string"` + + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"` + + metadataInputService15TestShapeGrantee `json:"-", xml:"-"` +} + +type metadataInputService15TestShapeGrantee struct { + SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + metadataInputService15TestShapeInputService15TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService15TestShapeInputService15TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService15TestShapeInputShape struct { + Grant *InputService15TestShapeGrant `locationName:"Grant" type:"structure"` + + metadataInputService15TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService15TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Grant"` +} + +// InputService16ProtocolTest is a client for InputService16ProtocolTest. +type InputService16ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService16ProtocolTest client. +func NewInputService16ProtocolTest(config *aws.Config) *InputService16ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice16protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService16ProtocolTest{service} +} + +// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *aws.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + if opInputService16TestCaseOperation1 == nil { + opInputService16TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + req = aws.NewRequest(c.Service, opInputService16TestCaseOperation1, input, output) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (output *InputService16TestShapeInputService16TestCaseOperation1Output, err error) { + req, out := c.InputService16TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService16TestCaseOperation1 *aws.Operation + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + metadataInputService16TestShapeInputService16TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService16TestShapeInputService16TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + Bucket *string `location:"uri" type:"string"` + + Key *string `location:"uri" type:"string"` + + metadataInputService16TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService16TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService17ProtocolTest is a client for InputService17ProtocolTest. +type InputService17ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService17ProtocolTest client. +func NewInputService17ProtocolTest(config *aws.Config) *InputService17ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice17protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService17ProtocolTest{service} +} + +// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation. +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { + if opInputService17TestCaseOperation1 == nil { + opInputService17TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService17TestCaseOperation1, input, output) + output = &InputService17TestShapeInputService17TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputShape) (output *InputService17TestShapeInputService17TestCaseOperation1Output, err error) { + req, out := c.InputService17TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService17TestCaseOperation1 *aws.Operation + +// InputService17TestCaseOperation2Request generates a request for the InputService17TestCaseOperation2 operation. +func (c *InputService17ProtocolTest) InputService17TestCaseOperation2Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation2Output) { + if opInputService17TestCaseOperation2 == nil { + opInputService17TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + } + + req = aws.NewRequest(c.Service, opInputService17TestCaseOperation2, input, output) + output = &InputService17TestShapeInputService17TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation2(input *InputService17TestShapeInputShape) (output *InputService17TestShapeInputService17TestCaseOperation2Output, err error) { + req, out := c.InputService17TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService17TestCaseOperation2 *aws.Operation + +type InputService17TestShapeInputService17TestCaseOperation1Output struct { + metadataInputService17TestShapeInputService17TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService17TestShapeInputService17TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation2Output struct { + metadataInputService17TestShapeInputService17TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService17TestShapeInputService17TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService17TestShapeInputShape struct { + Foo *string `location:"querystring" locationName:"param-name" type:"string"` + + metadataInputService17TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService17TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService18ProtocolTest is a client for InputService18ProtocolTest. +type InputService18ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService18ProtocolTest client. +func NewInputService18ProtocolTest(config *aws.Config) *InputService18ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice18protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService18ProtocolTest{service} +} + +// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output) { + if opInputService18TestCaseOperation1 == nil { + opInputService18TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation1, input, output) + output = &InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output, err error) { + req, out := c.InputService18TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation1 *aws.Operation + +// InputService18TestCaseOperation2Request generates a request for the InputService18TestCaseOperation2 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation2Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation2Output) { + if opInputService18TestCaseOperation2 == nil { + opInputService18TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation2, input, output) + output = &InputService18TestShapeInputService18TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation2(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestCaseOperation2Output, err error) { + req, out := c.InputService18TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation2 *aws.Operation + +// InputService18TestCaseOperation3Request generates a request for the InputService18TestCaseOperation3 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation3Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation3Output) { + if opInputService18TestCaseOperation3 == nil { + opInputService18TestCaseOperation3 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation3, input, output) + output = &InputService18TestShapeInputService18TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation3(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestCaseOperation3Output, err error) { + req, out := c.InputService18TestCaseOperation3Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation3 *aws.Operation + +// InputService18TestCaseOperation4Request generates a request for the InputService18TestCaseOperation4 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation4Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output) { + if opInputService18TestCaseOperation4 == nil { + opInputService18TestCaseOperation4 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation4, input, output) + output = &InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation4(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output, err error) { + req, out := c.InputService18TestCaseOperation4Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation4 *aws.Operation + +// InputService18TestCaseOperation5Request generates a request for the InputService18TestCaseOperation5 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation5Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output) { + if opInputService18TestCaseOperation5 == nil { + opInputService18TestCaseOperation5 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation5, input, output) + output = &InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation5(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output, err error) { + req, out := c.InputService18TestCaseOperation5Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation5 *aws.Operation + +// InputService18TestCaseOperation6Request generates a request for the InputService18TestCaseOperation6 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation6Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation6Output) { + if opInputService18TestCaseOperation6 == nil { + opInputService18TestCaseOperation6 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService18TestCaseOperation6, input, output) + output = &InputService18TestShapeInputService18TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation6(input *InputService18TestShapeInputShape) (output *InputService18TestShapeInputService18TestCaseOperation6Output, err error) { + req, out := c.InputService18TestCaseOperation6Request(input) + output = out + err = req.Send() + return +} + +var opInputService18TestCaseOperation6 *aws.Operation + +type InputService18TestShapeInputService18TestCaseOperation2Output struct { + metadataInputService18TestShapeInputService18TestCaseOperation2Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputService18TestCaseOperation3Output struct { + metadataInputService18TestShapeInputService18TestCaseOperation3Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestCaseOperation3Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputService18TestCaseOperation6Output struct { + metadataInputService18TestShapeInputService18TestCaseOperation6Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestCaseOperation6Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output struct { + metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output struct { + metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation4Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output struct { + metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputService18TestShapeInputService18TestCaseOperation5Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService18TestShapeInputShape struct { + RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"` + + metadataInputService18TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeInputShape struct { + SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` +} + +type InputService18TestShapeRecursiveStructType struct { + NoRecurse *string `type:"string"` + + RecursiveList []*InputService18TestShapeRecursiveStructType `type:"list"` + + RecursiveMap *map[string]*InputService18TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"` + + metadataInputService18TestShapeRecursiveStructType `json:"-", xml:"-"` +} + +type metadataInputService18TestShapeRecursiveStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +// InputService19ProtocolTest is a client for InputService19ProtocolTest. +type InputService19ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService19ProtocolTest client. +func NewInputService19ProtocolTest(config *aws.Config) *InputService19ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice19protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &InputService19ProtocolTest{service} +} + +// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation. +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *aws.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { + if opInputService19TestCaseOperation1 == nil { + opInputService19TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + HTTPMethod: "POST", + HTTPPath: "/path", + } + } + + req = aws.NewRequest(c.Service, opInputService19TestCaseOperation1, input, output) + output = &InputService19TestShapeInputService19TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (output *InputService19TestShapeInputService19TestCaseOperation1Output, err error) { + req, out := c.InputService19TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opInputService19TestCaseOperation1 *aws.Operation + +type InputService19TestShapeInputService19TestCaseOperation1Output struct { + metadataInputService19TestShapeInputService19TestCaseOperation1Output `json:"-", xml:"-"` +} + +type metadataInputService19TestShapeInputService19TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService19TestShapeInputShape struct { + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` + + metadataInputService19TestShapeInputShape `json:"-", xml:"-"` +} + +type metadataInputService19TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) { + svc := NewInputService1ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) { + svc := NewInputService1ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) { + svc := NewInputService2ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService2TestShapeInputShape{ + First: aws.Boolean(true), + Fourth: aws.Long(3), + Second: aws.Boolean(false), + Third: aws.Double(1.2), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`true3false1.2`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) { + svc := NewInputService3ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Bar: aws.String("b"), + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`bazba`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) { + svc := NewInputService4ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService4TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService4TestShapeSubStructure{}, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`baz`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) { + svc := NewInputService5ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService5TestShapeInputShape{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) { + svc := NewInputService6ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService6TestShapeInputShape{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) { + svc := NewInputService7ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService7TestShapeInputShape{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + ListParam: []*InputService9TestShapeSingleFieldStruct{ + &InputService9TestShapeSingleFieldStruct{ + Element: aws.String("one"), + }, + &InputService9TestShapeSingleFieldStruct{ + Element: aws.String("two"), + }, + &InputService9TestShapeSingleFieldStruct{ + Element: aws.String("three"), + }, + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) { + svc := NewInputService10ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService10TestShapeInputShape{ + StructureParam: &InputService10TestShapeStructureShape{ + B: []byte("foo"), + T: aws.Time(time.Unix(1422172800, 0)), + }, + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`Zm9v2015-01-25T08:00:00Z`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) { + svc := NewInputService11ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService11TestShapeInputShape{ + Foo: &map[string]*string{ + "a": aws.String("b"), + "c": aws.String("d"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "b", r.Header.Get("x-foo-a")) + assert.Equal(t, "d", r.Header.Get("x-foo-c")) + +} + +func TestInputService12ProtocolTestStringPayloadCase1(t *testing.T) { + svc := NewInputService12ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService12TestShapeInputShape{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) { + svc := NewInputService13ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService13TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) { + svc := NewInputService13ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService13TestShapeInputShape{} + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) { + svc := NewInputService14ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService14TestShapeInputShape{ + Foo: &InputService14TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) { + svc := NewInputService14ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService14TestShapeInputShape{} + req, _ := svc.InputService14TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestXMLAttributeCase1(t *testing.T) { + svc := NewInputService15ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService15TestShapeInputShape{ + Grant: &InputService15TestShapeGrant{ + Grantee: &InputService15TestShapeGrantee{ + EmailAddress: aws.String("foo@example.com"), + Type: aws.String("CanonicalUser"), + }, + }, + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foo@example.com`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestGreedyKeysCase1(t *testing.T) { + svc := NewInputService16ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService16TestShapeInputShape{ + Bucket: aws.String("my/bucket"), + Key: aws.String("testing /123"), + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + svc := NewInputService17ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService17TestShapeInputShape{} + req, _ := svc.InputService17TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + svc := NewInputService17ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService17TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService17TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase1(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService18TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase2(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService18TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase3(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService18TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase4(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveList: []*InputService18TestShapeRecursiveStructType{ + &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService18TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase5(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveList: []*InputService18TestShapeRecursiveStructType{ + &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + &InputService18TestShapeRecursiveStructType{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService18TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestRecursiveShapesCase6(t *testing.T) { + svc := NewInputService18ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService18TestShapeInputShape{ + RecursiveStruct: &InputService18TestShapeRecursiveStructType{ + RecursiveMap: &map[string]*InputService18TestShapeRecursiveStructType{ + "bar": &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + "foo": &InputService18TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService18TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, util.Trim(`barbarfoofoo`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestTimestampInHeaderCase1(t *testing.T) { + svc := NewInputService19ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService19TestShapeInputShape{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService19TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + assert.Equal(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/restxml.go new file mode 100644 index 000000000..9a952ee19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/restxml.go @@ -0,0 +1,48 @@ +package restxml + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/rest-xml.json build_test.go +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/query" + "github.com/awslabs/aws-sdk-go/internal/protocol/rest" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +func Build(r *aws.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = err + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +func Unmarshal(r *aws.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = err + return + } + } +} + +func UnmarshalMeta(r *aws.Request) { + rest.Unmarshal(r) +} + +func UnmarshalError(r *aws.Request) { + query.UnmarshalError(r) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go new file mode 100644 index 000000000..33e261628 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go @@ -0,0 +1,1171 @@ +package restxml_test + +import ( + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/restxml" + "github.com/awslabs/aws-sdk-go/internal/signer/v4" + + "bytes" + "encoding/json" + "encoding/xml" + "github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/awslabs/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} + +// OutputService1ProtocolTest is a client for OutputService1ProtocolTest. +type OutputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService1ProtocolTest client. +func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice1protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService1ProtocolTest{service} +} + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { + if opOutputService1TestCaseOperation1 == nil { + opOutputService1TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService1TestCaseOperation1, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (output *OutputService1TestShapeOutputShape, err error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService1TestCaseOperation1 *aws.Operation + +// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { + if opOutputService1TestCaseOperation2 == nil { + opOutputService1TestCaseOperation2 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService1TestCaseOperation2, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (output *OutputService1TestShapeOutputShape, err error) { + req, out := c.OutputService1TestCaseOperation2Request(input) + output = out + err = req.Send() + return +} + +var opOutputService1TestCaseOperation2 *aws.Operation + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation2Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation2Input `json:"-", xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation2Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService1TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService2ProtocolTest is a client for OutputService2ProtocolTest. +type OutputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService2ProtocolTest client. +func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice2protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService2ProtocolTest{service} +} + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { + if opOutputService2TestCaseOperation1 == nil { + opOutputService2TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService2TestCaseOperation1, input, output) + output = &OutputService2TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (output *OutputService2TestShapeOutputShape, err error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService2TestCaseOperation1 *aws.Operation + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputShape struct { + Blob []byte `type:"blob"` + + metadataOutputService2TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService2TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService3ProtocolTest is a client for OutputService3ProtocolTest. +type OutputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService3ProtocolTest client. +func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice3protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService3ProtocolTest{service} +} + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { + if opOutputService3TestCaseOperation1 == nil { + opOutputService3TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService3TestCaseOperation1, input, output) + output = &OutputService3TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (output *OutputService3TestShapeOutputShape, err error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService3TestCaseOperation1 *aws.Operation + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputShape struct { + ListMember []*string `type:"list"` + + metadataOutputService3TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService3TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService4ProtocolTest is a client for OutputService4ProtocolTest. +type OutputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService4ProtocolTest client. +func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice4protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService4ProtocolTest{service} +} + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { + if opOutputService4TestCaseOperation1 == nil { + opOutputService4TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService4TestCaseOperation1, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (output *OutputService4TestShapeOutputShape, err error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService4TestCaseOperation1 *aws.Operation + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService4TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService4TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService5ProtocolTest is a client for OutputService5ProtocolTest. +type OutputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService5ProtocolTest client. +func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice5protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService5ProtocolTest{service} +} + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { + if opOutputService5TestCaseOperation1 == nil { + opOutputService5TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService5TestCaseOperation1, input, output) + output = &OutputService5TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (output *OutputService5TestShapeOutputShape, err error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService5TestCaseOperation1 *aws.Operation + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService5TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService5TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService6ProtocolTest is a client for OutputService6ProtocolTest. +type OutputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService6ProtocolTest client. +func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice6protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService6ProtocolTest{service} +} + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { + if opOutputService6TestCaseOperation1 == nil { + opOutputService6TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService6TestCaseOperation1, input, output) + output = &OutputService6TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (output *OutputService6TestShapeOutputShape, err error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService6TestCaseOperation1 *aws.Operation + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputShape struct { + Map *map[string]*OutputService6TestShapeSingleStructure `type:"map"` + + metadataOutputService6TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService6TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeSingleStructure struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService6TestShapeSingleStructure `json:"-", xml:"-"` +} + +type metadataOutputService6TestShapeSingleStructure struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService7ProtocolTest is a client for OutputService7ProtocolTest. +type OutputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService7ProtocolTest client. +func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice7protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService7ProtocolTest{service} +} + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { + if opOutputService7TestCaseOperation1 == nil { + opOutputService7TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService7TestCaseOperation1, input, output) + output = &OutputService7TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (output *OutputService7TestShapeOutputShape, err error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService7TestCaseOperation1 *aws.Operation + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputShape struct { + Map *map[string]*string `type:"map" flattened:"true"` + + metadataOutputService7TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService7TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService8ProtocolTest is a client for OutputService8ProtocolTest. +type OutputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService8ProtocolTest client. +func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice8protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService8ProtocolTest{service} +} + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { + if opOutputService8TestCaseOperation1 == nil { + opOutputService8TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService8TestCaseOperation1, input, output) + output = &OutputService8TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (output *OutputService8TestShapeOutputShape, err error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService8TestCaseOperation1 *aws.Operation + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputShape struct { + Map *map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"` + + metadataOutputService8TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService8TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService9ProtocolTest is a client for OutputService9ProtocolTest. +type OutputService9ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService9ProtocolTest client. +func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice9protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService9ProtocolTest{service} +} + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) { + if opOutputService9TestCaseOperation1 == nil { + opOutputService9TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService9TestCaseOperation1, input, output) + output = &OutputService9TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (output *OutputService9TestShapeOutputShape, err error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService9TestCaseOperation1 *aws.Operation + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeOutputShape struct { + Data *OutputService9TestShapeSingleStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` + + metadataOutputService9TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Data"` +} + +type OutputService9TestShapeSingleStructure struct { + Foo *string `type:"string"` + + metadataOutputService9TestShapeSingleStructure `json:"-", xml:"-"` +} + +type metadataOutputService9TestShapeSingleStructure struct { + SDKShapeTraits bool `type:"structure"` +} + +// OutputService10ProtocolTest is a client for OutputService10ProtocolTest. +type OutputService10ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService10ProtocolTest client. +func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice10protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService10ProtocolTest{service} +} + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) { + if opOutputService10TestCaseOperation1 == nil { + opOutputService10TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService10TestCaseOperation1, input, output) + output = &OutputService10TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (output *OutputService10TestShapeOutputShape, err error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService10TestCaseOperation1 *aws.Operation + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService10TestShapeOutputShape struct { + Stream []byte `type:"blob"` + + metadataOutputService10TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService10TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure" payload:"Stream"` +} + +// OutputService11ProtocolTest is a client for OutputService11ProtocolTest. +type OutputService11ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService11ProtocolTest client. +func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice11protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + return &OutputService11ProtocolTest{service} +} + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) { + if opOutputService11TestCaseOperation1 == nil { + opOutputService11TestCaseOperation1 = &aws.Operation{ + Name: "OperationName", + } + } + + req = aws.NewRequest(c.Service, opOutputService11TestCaseOperation1, input, output) + output = &OutputService11TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (output *OutputService11TestShapeOutputShape, err error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + output = out + err = req.Send() + return +} + +var opOutputService11TestCaseOperation1 *aws.Operation + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-", xml:"-"` +} + +type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeOutputShape struct { + Char *string `location:"header" locationName:"x-char" type:"character"` + + Double *float64 `location:"header" locationName:"x-double" type:"double"` + + FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"` + + Float *float64 `location:"header" locationName:"x-float" type:"float"` + + Integer *int64 `location:"header" locationName:"x-int" type:"integer"` + + Long *int64 `location:"header" locationName:"x-long" type:"long"` + + Str *string `location:"header" locationName:"x-str" type:"string"` + + Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"` + + metadataOutputService11TestShapeOutputShape `json:"-", xml:"-"` +} + +type metadataOutputService11TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewOutputService1ProtocolTest(nil) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, 200, *out.Long) + assert.Equal(t, 123, *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) { + svc := NewOutputService1ProtocolTest(nil) + + buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, 200, *out.Long) + assert.Equal(t, 123, *out.Num) + assert.Equal(t, "", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + svc := NewOutputService2ProtocolTest(nil) + + buf := bytes.NewReader([]byte("dmFsdWU=")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + svc := NewOutputService3ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + svc := NewOutputService4ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewOutputService5ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + svc := NewOutputService6ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"].Foo) + assert.Equal(t, "bar", *(*out.Map)["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + svc := NewOutputService7ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"]) + assert.Equal(t, "bar", *(*out.Map)["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + svc := NewOutputService8ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *(*out.Map)["baz"]) + assert.Equal(t, "bar", *(*out.Map)["qux"]) + +} + +func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) { + svc := NewOutputService9ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { + svc := NewOutputService10ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} + +func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) { + svc := NewOutputService11ProtocolTest(nil) + + buf := bytes.NewReader([]byte("")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("x-char", "a") + req.HTTPResponse.Header.Set("x-double", "1.5") + req.HTTPResponse.Header.Set("x-false-bool", "false") + req.HTTPResponse.Header.Set("x-float", "1.5") + req.HTTPResponse.Header.Set("x-int", "1") + req.HTTPResponse.Header.Set("x-long", "100") + req.HTTPResponse.Header.Set("x-str", "string") + req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT") + req.HTTPResponse.Header.Set("x-true-bool", "true") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.5, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.5, *out.Float) + assert.Equal(t, 1, *out.Integer) + assert.Equal(t, 100, *out.Long) + assert.Equal(t, "string", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..af2ce630b --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/build.go @@ -0,0 +1,262 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + member := elemOf(value.Field(i)) + field := t.Field(i) + mTag := field.Tag + + if mTag.Get("location") != "" { // skip non-body members + continue + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + fmt.Println(k, v.Interface()) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + str = base64.StdEncoding.EncodeToString(converted) + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..d5b7d71be --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,251 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("SDKShapeTraits"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{&XMLNode{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + t = t.Elem() + if r.IsNil() { + r.Set(reflect.New(t)) + r.Elem().Set(reflect.MakeMap(t)) + } + + r = r.Elem() + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } else { + r.Set(reflect.ValueOf(&t)) + } + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..b6a1cfadd --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,100 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k, _ := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4.go new file mode 100644 index 000000000..d331475f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4.go @@ -0,0 +1,296 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/awslabs/aws-sdk-go/aws" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + AccessKeyID string + SecretAccessKey string + SessionToken string + Query url.Values + Body io.ReadSeeker + Debug uint + Logger io.Writer + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign requests with signature version 4. +func Sign(req *aws.Request) { + creds, err := req.Service.Config.Credentials.Credentials() + if err != nil { + req.Error = err + return + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: req.Service.ServiceName, + Region: req.Service.Config.Region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Debug: req.Service.Config.LogLevel, + Logger: req.Service.Config.Logger, + } + s.sign() + return +} + +func (v4 *signer) sign() { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.SessionToken) + } + + v4.build() + + if v4.Debug > 0 { + out := v4.Logger + fmt.Fprintf(out, "---[ CANONICAL STRING ]-----------------------------\n") + fmt.Fprintln(out, v4.canonicalString) + fmt.Fprintf(out, "---[ STRING TO SIGN ]--------------------------------\n") + fmt.Fprintln(out, v4.stringToSign) + fmt.Fprintf(out, "---[ SIGNED URL ]--------------------------------\n") + fmt.Fprintln(out, v4.Request.URL) + fmt.Fprintf(out, "-----------------------------------------------------\n") + } +} + +func (v4 *signer) build() { + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + if v4.isPresign { + v4.buildQuery() // no depends + } + v4.buildCanonicalHeaders() // depends on cred string + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.AccessKeyID+"/"+v4.credentialString) + } +} + +func (v4 *signer) buildQuery() { + for k, h := range v4.Request.Header { + if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { + continue // never hoist x-amz-* headers, they must be signed + } + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // never hoist ignored headers + } + + v4.Request.Header.Del(k) + v4.Query.Del(k) + for _, v := range h { + v4.Query.Add(k, v) + } + } +} + +func (v4 *signer) buildCanonicalHeaders() { + headers := make([]string, 0) + headers = append(headers, "host") + for k, _ := range v4.Request.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = v4.Query.Encode() + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + packet := make([]byte, 4096) + hash := sha256.New() + + reader.Seek(0, 0) + for { + n, err := reader.Read(packet) + if n > 0 { + hash.Write(packet[0:n]) + } + if err == io.EOF || n == 0 { + break + } + } + reader.Seek(0, 0) + + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4_test.go new file mode 100644 index 000000000..fb15c3949 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/internal/signer/v4/v4_test.go @@ -0,0 +1,89 @@ +package v4 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* ()") + + return signer{ + Request: req, + Time: signTime, + ExpireTime: expireTime, + Query: req.URL.Query(), + Body: reader, + ServiceName: serviceName, + Region: region, + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "SESSION", + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedHeaders := "host;x-amz-meta-other-header;x-amz-target" + expectedSig := "41c18d68f9191079dfeead4e3f034328f89d86c79f8e9d51dd48bb70eaf623fc" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + + q := signer.Request.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=0196959cabd964bd10c05217b40ed151882dd394190438bab0c658dafdbff7a1" + + q := signer.Request.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/api.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/api.go new file mode 100644 index 000000000..8aea62dc6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/api.go @@ -0,0 +1,2738 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "time" + + "github.com/awslabs/aws-sdk-go/aws" +) + +// AssociateVPCWithHostedZoneRequest generates a request for the AssociateVPCWithHostedZone operation. +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *aws.Request, output *AssociateVPCWithHostedZoneOutput) { + if opAssociateVPCWithHostedZone == nil { + opAssociateVPCWithHostedZone = &aws.Operation{ + Name: "AssociateVPCWithHostedZone", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + } + + req = c.newRequest(opAssociateVPCWithHostedZone, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/associatevpc resource. The request body must include an XML document +// with a AssociateVPCWithHostedZoneRequest element. The response returns the +// AssociateVPCWithHostedZoneResponse element that contains ChangeInfo for you +// to track the progress of the AssociateVPCWithHostedZoneRequest you made. +// See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (output *AssociateVPCWithHostedZoneOutput, err error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + output = out + err = req.Send() + return +} + +var opAssociateVPCWithHostedZone *aws.Operation + +// ChangeResourceRecordSetsRequest generates a request for the ChangeResourceRecordSets operation. +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *aws.Request, output *ChangeResourceRecordSetsOutput) { + if opChangeResourceRecordSets == nil { + opChangeResourceRecordSets = &aws.Operation{ + Name: "ChangeResourceRecordSets", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + } + + req = c.newRequest(opChangeResourceRecordSets, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the 2013-04-01/hostedzone/hosted +// Zone ID/rrset resource. The request body must include an XML document with +// a ChangeResourceRecordSetsRequest element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see Creating, Changing, and Deleting Resource Record Sets Using the Route +// 53 API (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RRSchanges.html#RRSchanges_API) +// in the Amazon Route 53 Developer Guide. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Route 53 DNS servers. Initially, the status of a change is +// PENDING. This means the change has not yet propagated to all the authoritative +// Route 53 DNS servers. When the change is propagated to all hosts, the change +// returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// - A request cannot contain more than 100 Change elements. +// +// - A request cannot contain more than 1000 ResourceRecord elements. +// +// The sum of the number of characters (including spaces) in all Value elements +// in a request cannot exceed 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (output *ChangeResourceRecordSetsOutput, err error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + output = out + err = req.Send() + return +} + +var opChangeResourceRecordSets *aws.Operation + +// ChangeTagsForResourceRequest generates a request for the ChangeTagsForResource operation. +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *aws.Request, output *ChangeTagsForResourceOutput) { + if opChangeTagsForResource == nil { + opChangeTagsForResource = &aws.Operation{ + Name: "ChangeTagsForResource", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + } + + req = c.newRequest(opChangeTagsForResource, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (output *ChangeTagsForResourceOutput, err error) { + req, out := c.ChangeTagsForResourceRequest(input) + output = out + err = req.Send() + return +} + +var opChangeTagsForResource *aws.Operation + +// CreateHealthCheckRequest generates a request for the CreateHealthCheck operation. +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *aws.Request, output *CreateHealthCheckOutput) { + if opCreateHealthCheck == nil { + opCreateHealthCheck = &aws.Operation{ + Name: "CreateHealthCheck", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + } + + req = c.newRequest(opCreateHealthCheck, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the 2013-04-01/healthcheck +// resource. The request body must include an XML document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (output *CreateHealthCheckOutput, err error) { + req, out := c.CreateHealthCheckRequest(input) + output = out + err = req.Send() + return +} + +var opCreateHealthCheck *aws.Operation + +// CreateHostedZoneRequest generates a request for the CreateHostedZone operation. +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *aws.Request, output *CreateHostedZoneOutput) { + if opCreateHostedZone == nil { + opCreateHostedZone = &aws.Operation{ + Name: "CreateHostedZone", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + } + + req = c.newRequest(opCreateHostedZone, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the 2013-04-01/hostedzone +// resource. The request body must include an XML document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Route 53 Creates for a Hosted +// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Route 53 DNS servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (output *CreateHostedZoneOutput, err error) { + req, out := c.CreateHostedZoneRequest(input) + output = out + err = req.Send() + return +} + +var opCreateHostedZone *aws.Operation + +// CreateReusableDelegationSetRequest generates a request for the CreateReusableDelegationSet operation. +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *aws.Request, output *CreateReusableDelegationSetOutput) { + if opCreateReusableDelegationSet == nil { + opCreateReusableDelegationSet = &aws.Operation{ + Name: "CreateReusableDelegationSet", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + } + + req = c.newRequest(opCreateReusableDelegationSet, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the 2013-04-01/delegationset +// resource. The request body must include an XML document with a CreateReusableDelegationSetRequest +// element. The response returns the CreateReusableDelegationSetResponse element +// that contains metadata about the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (output *CreateReusableDelegationSetOutput, err error) { + req, out := c.CreateReusableDelegationSetRequest(input) + output = out + err = req.Send() + return +} + +var opCreateReusableDelegationSet *aws.Operation + +// DeleteHealthCheckRequest generates a request for the DeleteHealthCheck operation. +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *aws.Request, output *DeleteHealthCheckOutput) { + if opDeleteHealthCheck == nil { + opDeleteHealthCheck = &aws.Operation{ + Name: "DeleteHealthCheck", + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + } + + req = c.newRequest(opDeleteHealthCheck, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the 2013-04-01/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Route 53 will deny your request with a HealthCheckInUse +// error. For information about disassociating the records from your health +// check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (output *DeleteHealthCheckOutput, err error) { + req, out := c.DeleteHealthCheckRequest(input) + output = out + err = req.Send() + return +} + +var opDeleteHealthCheck *aws.Operation + +// DeleteHostedZoneRequest generates a request for the DeleteHostedZone operation. +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *aws.Request, output *DeleteHostedZoneOutput) { + if opDeleteHostedZone == nil { + opDeleteHostedZone = &aws.Operation{ + Name: "DeleteHostedZone", + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + } + + req = c.newRequest(opDeleteHostedZone, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the 2013-04-01/hostedzone/hosted zone ID resource. +// +// For more information about deleting a hosted zone, see Deleting a Hosted +// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// You can delete a hosted zone only if there are no resource record sets +// other than the default SOA record and NS resource record sets. If your hosted +// zone contains other resource record sets, you must delete them before you +// can delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Route 53 will deny your request with a HostedZoneNotEmpty +// error. For information about deleting records from your hosted zone, see +// ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (output *DeleteHostedZoneOutput, err error) { + req, out := c.DeleteHostedZoneRequest(input) + output = out + err = req.Send() + return +} + +var opDeleteHostedZone *aws.Operation + +// DeleteReusableDelegationSetRequest generates a request for the DeleteReusableDelegationSet operation. +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *aws.Request, output *DeleteReusableDelegationSetOutput) { + if opDeleteReusableDelegationSet == nil { + opDeleteReusableDelegationSet = &aws.Operation{ + Name: "DeleteReusableDelegationSet", + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + } + + req = c.newRequest(opDeleteReusableDelegationSet, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the 2013-04-01/delegationset/delegation set +// ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Route 53 will deny your request with a DelegationSetInUse error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (output *DeleteReusableDelegationSetOutput, err error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + output = out + err = req.Send() + return +} + +var opDeleteReusableDelegationSet *aws.Operation + +// DisassociateVPCFromHostedZoneRequest generates a request for the DisassociateVPCFromHostedZone operation. +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *aws.Request, output *DisassociateVPCFromHostedZoneOutput) { + if opDisassociateVPCFromHostedZone == nil { + opDisassociateVPCFromHostedZone = &aws.Operation{ + Name: "DisassociateVPCFromHostedZone", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + } + + req = c.newRequest(opDisassociateVPCFromHostedZone, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/disassociatevpc resource. The request body must include an XML document +// with a DisassociateVPCFromHostedZoneRequest element. The response returns +// the DisassociateVPCFromHostedZoneResponse element that contains ChangeInfo +// for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (output *DisassociateVPCFromHostedZoneOutput, err error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + output = out + err = req.Send() + return +} + +var opDisassociateVPCFromHostedZone *aws.Operation + +// GetChangeRequest generates a request for the GetChange operation. +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *aws.Request, output *GetChangeOutput) { + if opGetChange == nil { + opGetChange = &aws.Operation{ + Name: "GetChange", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + } + + req = c.newRequest(opGetChange, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Route 53 DNS servers. This is the initial status of all change batch +// requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (output *GetChangeOutput, err error) { + req, out := c.GetChangeRequest(input) + output = out + err = req.Send() + return +} + +var opGetChange *aws.Operation + +// GetCheckerIPRangesRequest generates a request for the GetCheckerIPRanges operation. +func (c *Route53) GetCheckerIPRangesRequest(input *GetCheckerIPRangesInput) (req *aws.Request, output *GetCheckerIPRangesOutput) { + if opGetCheckerIPRanges == nil { + opGetCheckerIPRanges = &aws.Operation{ + Name: "GetCheckerIpRanges", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + } + + req = c.newRequest(opGetCheckerIPRanges, input, output) + output = &GetCheckerIPRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the 2013-04-01/checkeripranges +// resource. You can use these IP addresses to configure router and firewall +// rules to allow health checkers to check the health of your resources. +func (c *Route53) GetCheckerIPRanges(input *GetCheckerIPRangesInput) (output *GetCheckerIPRangesOutput, err error) { + req, out := c.GetCheckerIPRangesRequest(input) + output = out + err = req.Send() + return +} + +var opGetCheckerIPRanges *aws.Operation + +// GetGeoLocationRequest generates a request for the GetGeoLocation operation. +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *aws.Request, output *GetGeoLocationOutput) { + if opGetGeoLocation == nil { + opGetGeoLocation = &aws.Operation{ + Name: "GetGeoLocation", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + } + + req = c.newRequest(opGetGeoLocation, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the 2013-04-01/geolocation +// resource with one of these options: continentcode | countrycode | countrycode +// and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (output *GetGeoLocationOutput, err error) { + req, out := c.GetGeoLocationRequest(input) + output = out + err = req.Send() + return +} + +var opGetGeoLocation *aws.Operation + +// GetHealthCheckRequest generates a request for the GetHealthCheck operation. +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *aws.Request, output *GetHealthCheckOutput) { + if opGetHealthCheck == nil { + opGetHealthCheck = &aws.Operation{ + Name: "GetHealthCheck", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + } + + req = c.newRequest(opGetHealthCheck, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the 2013-04-01/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (output *GetHealthCheckOutput, err error) { + req, out := c.GetHealthCheckRequest(input) + output = out + err = req.Send() + return +} + +var opGetHealthCheck *aws.Operation + +// GetHealthCheckCountRequest generates a request for the GetHealthCheckCount operation. +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *aws.Request, output *GetHealthCheckCountOutput) { + if opGetHealthCheckCount == nil { + opGetHealthCheckCount = &aws.Operation{ + Name: "GetHealthCheckCount", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + } + + req = c.newRequest(opGetHealthCheckCount, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (output *GetHealthCheckCountOutput, err error) { + req, out := c.GetHealthCheckCountRequest(input) + output = out + err = req.Send() + return +} + +var opGetHealthCheckCount *aws.Operation + +// GetHealthCheckLastFailureReasonRequest generates a request for the GetHealthCheckLastFailureReason operation. +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *aws.Request, output *GetHealthCheckLastFailureReasonOutput) { + if opGetHealthCheckLastFailureReason == nil { + opGetHealthCheckLastFailureReason = &aws.Operation{ + Name: "GetHealthCheckLastFailureReason", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + } + + req = c.newRequest(opGetHealthCheckLastFailureReason, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the 2013-04-01/healthcheck/health check ID/lastfailurereason +// resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (output *GetHealthCheckLastFailureReasonOutput, err error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + output = out + err = req.Send() + return +} + +var opGetHealthCheckLastFailureReason *aws.Operation + +// GetHealthCheckStatusRequest generates a request for the GetHealthCheckStatus operation. +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *aws.Request, output *GetHealthCheckStatusOutput) { + if opGetHealthCheckStatus == nil { + opGetHealthCheckStatus = &aws.Operation{ + Name: "GetHealthCheckStatus", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + } + + req = c.newRequest(opGetHealthCheckStatus, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the 2013-04-01/healthcheck/health +// check ID/status resource. You can use this call to get a health check's current +// status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (output *GetHealthCheckStatusOutput, err error) { + req, out := c.GetHealthCheckStatusRequest(input) + output = out + err = req.Send() + return +} + +var opGetHealthCheckStatus *aws.Operation + +// GetHostedZoneRequest generates a request for the GetHostedZone operation. +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *aws.Request, output *GetHostedZoneOutput) { + if opGetHostedZone == nil { + opGetHostedZone = &aws.Operation{ + Name: "GetHostedZone", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + } + + req = c.newRequest(opGetHostedZone, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// 2013-04-01/hostedzone/hosted zone ID resource. The delegation set is the +// four Route 53 name servers that were assigned to the hosted zone when you +// created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (output *GetHostedZoneOutput, err error) { + req, out := c.GetHostedZoneRequest(input) + output = out + err = req.Send() + return +} + +var opGetHostedZone *aws.Operation + +// GetHostedZoneCountRequest generates a request for the GetHostedZoneCount operation. +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *aws.Request, output *GetHostedZoneCountOutput) { + if opGetHostedZoneCount == nil { + opGetHostedZoneCount = &aws.Operation{ + Name: "GetHostedZoneCount", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + } + + req = c.newRequest(opGetHostedZoneCount, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (output *GetHostedZoneCountOutput, err error) { + req, out := c.GetHostedZoneCountRequest(input) + output = out + err = req.Send() + return +} + +var opGetHostedZoneCount *aws.Operation + +// GetReusableDelegationSetRequest generates a request for the GetReusableDelegationSet operation. +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *aws.Request, output *GetReusableDelegationSetOutput) { + if opGetReusableDelegationSet == nil { + opGetReusableDelegationSet = &aws.Operation{ + Name: "GetReusableDelegationSet", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + } + + req = c.newRequest(opGetReusableDelegationSet, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the 2013-04-01/delegationset/delegation +// set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (output *GetReusableDelegationSetOutput, err error) { + req, out := c.GetReusableDelegationSetRequest(input) + output = out + err = req.Send() + return +} + +var opGetReusableDelegationSet *aws.Operation + +// ListGeoLocationsRequest generates a request for the ListGeoLocations operation. +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *aws.Request, output *ListGeoLocationsOutput) { + if opListGeoLocations == nil { + opListGeoLocations = &aws.Operation{ + Name: "ListGeoLocations", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + } + + req = c.newRequest(opListGeoLocations, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// 2013-04-01/geolocations resource. The response to this request includes a +// GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (output *ListGeoLocationsOutput, err error) { + req, out := c.ListGeoLocationsRequest(input) + output = out + err = req.Send() + return +} + +var opListGeoLocations *aws.Operation + +// ListHealthChecksRequest generates a request for the ListHealthChecks operation. +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *aws.Request, output *ListHealthChecksOutput) { + if opListHealthChecks == nil { + opListHealthChecks = &aws.Operation{ + Name: "ListHealthChecks", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + } + } + + req = c.newRequest(opListHealthChecks, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero, one, or multiple HealthCheck child elements. By default, the list of +// health checks is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the health check that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (output *ListHealthChecksOutput, err error) { + req, out := c.ListHealthChecksRequest(input) + output = out + err = req.Send() + return +} + +var opListHealthChecks *aws.Operation + +// ListHostedZonesRequest generates a request for the ListHostedZones operation. +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *aws.Request, output *ListHostedZonesOutput) { + if opListHostedZones == nil { + opListHostedZones = &aws.Operation{ + Name: "ListHostedZones", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + } + } + + req = c.newRequest(opListHostedZones, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero, one, or multiple HostedZone child elements. By default, the list of +// hosted zones is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the hosted zone that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (output *ListHostedZonesOutput, err error) { + req, out := c.ListHostedZonesRequest(input) + output = out + err = req.Send() + return +} + +var opListHostedZones *aws.Operation + +// ListHostedZonesByNameRequest generates a request for the ListHostedZonesByName operation. +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *aws.Request, output *ListHostedZonesByNameOutput) { + if opListHostedZonesByName == nil { + opListHostedZonesByName = &aws.Operation{ + Name: "ListHostedZonesByName", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + } + + req = c.newRequest(opListHostedZonesByName, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (output *ListHostedZonesByNameOutput, err error) { + req, out := c.ListHostedZonesByNameRequest(input) + output = out + err = req.Send() + return +} + +var opListHostedZonesByName *aws.Operation + +// ListResourceRecordSetsRequest generates a request for the ListResourceRecordSets operation. +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *aws.Request, output *ListResourceRecordSetsOutput) { + if opListResourceRecordSets == nil { + opListResourceRecordSets = &aws.Operation{ + Name: "ListResourceRecordSets", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + } + } + + req = c.newRequest(opListResourceRecordSets, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// Imagine all the resource record sets in a zone listed out in front of you. +// Imagine them sorted lexicographically first by DNS name (with the labels +// reversed, like "com.amazon.www" for example), and secondarily, lexicographically +// by record type. This operation retrieves at most MaxItems resource record +// sets from this list, in order, starting at a position specified by the Name +// and Type arguments: +// +// If both Name and Type are omitted, this means start the results at the +// first RRSET in the HostedZone. If Name is specified but Type is omitted, +// this means start the results at the first RRSET in the list whose name is +// greater than or equal to Name. If both Name and Type are specified, this +// means start the results at the first RRSET in the list whose name is greater +// than or equal to Name and whose type is greater than or equal to Type. It +// is an error to specify the Type but not the Name. Use ListResourceRecordSets +// to retrieve a single known record set by specifying the record set's name +// and type, and setting MaxItems = 1 +// +// To retrieve all the records in a HostedZone, first pause any processes making +// calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets +// without a Name and Type to get the first page of record sets. For subsequent +// calls, set Name and Type to the NextName and NextType values returned by +// the previous response. +// +// In the presence of concurrent ChangeResourceRecordSets calls, there is no +// consistency of results across calls to ListResourceRecordSets. The only way +// to get a consistent multi-page snapshot of all RRSETs in a zone is to stop +// making changes while pagination is in progress. +// +// However, the results from ListResourceRecordSets are consistent within a +// page. If MakeChange calls are taking place concurrently, the result of each +// one will either be completely visible in your results or not at all. You +// will not see partial changes, or changes that do not ultimately succeed. +// (This follows from the fact that MakeChange is atomic) +// +// The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. +// To be precise, if a single process makes a call to ChangeResourceRecordSets +// and receives a successful response, the effects of that change will be visible +// in a subsequent call to ListResourceRecordSets by that process. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (output *ListResourceRecordSetsOutput, err error) { + req, out := c.ListResourceRecordSetsRequest(input) + output = out + err = req.Send() + return +} + +var opListResourceRecordSets *aws.Operation + +// ListReusableDelegationSetsRequest generates a request for the ListReusableDelegationSets operation. +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *aws.Request, output *ListReusableDelegationSetsOutput) { + if opListReusableDelegationSets == nil { + opListReusableDelegationSets = &aws.Operation{ + Name: "ListReusableDelegationSets", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + } + + req = c.newRequest(opListReusableDelegationSets, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero, one, or multiple DelegationSet child +// elements. By default, the list of delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (output *ListReusableDelegationSetsOutput, err error) { + req, out := c.ListReusableDelegationSetsRequest(input) + output = out + err = req.Send() + return +} + +var opListReusableDelegationSets *aws.Operation + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *aws.Request, output *ListTagsForResourceOutput) { + if opListTagsForResource == nil { + opListTagsForResource = &aws.Operation{ + Name: "ListTagsForResource", + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + } + + req = c.newRequest(opListTagsForResource, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (output *ListTagsForResourceOutput, err error) { + req, out := c.ListTagsForResourceRequest(input) + output = out + err = req.Send() + return +} + +var opListTagsForResource *aws.Operation + +// ListTagsForResourcesRequest generates a request for the ListTagsForResources operation. +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *aws.Request, output *ListTagsForResourcesOutput) { + if opListTagsForResources == nil { + opListTagsForResources = &aws.Operation{ + Name: "ListTagsForResources", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + } + + req = c.newRequest(opListTagsForResources, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (output *ListTagsForResourcesOutput, err error) { + req, out := c.ListTagsForResourcesRequest(input) + output = out + err = req.Send() + return +} + +var opListTagsForResources *aws.Operation + +// UpdateHealthCheckRequest generates a request for the UpdateHealthCheck operation. +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *aws.Request, output *UpdateHealthCheckOutput) { + if opUpdateHealthCheck == nil { + opUpdateHealthCheck = &aws.Operation{ + Name: "UpdateHealthCheck", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + } + + req = c.newRequest(opUpdateHealthCheck, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the 2013-04-01/healthcheck/health +// check ID resource. The request body must include an XML document with an +// UpdateHealthCheckRequest element. The response returns an UpdateHealthCheckResponse +// element, which contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (output *UpdateHealthCheckOutput, err error) { + req, out := c.UpdateHealthCheckRequest(input) + output = out + err = req.Send() + return +} + +var opUpdateHealthCheck *aws.Operation + +// UpdateHostedZoneCommentRequest generates a request for the UpdateHostedZoneComment operation. +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *aws.Request, output *UpdateHostedZoneCommentOutput) { + if opUpdateHostedZoneComment == nil { + opUpdateHostedZoneComment = &aws.Operation{ + Name: "UpdateHostedZoneComment", + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + } + + req = c.newRequest(opUpdateHostedZoneComment, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID resource. The request body must include an XML document with a UpdateHostedZoneCommentRequest +// element. The response to this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (output *UpdateHostedZoneCommentOutput, err error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + output = out + err = req.Send() + return +} + +var opUpdateHostedZoneComment *aws.Operation + +// Alias resource record sets only: Information about the domain to which you +// are redirecting traffic. +// +// For more information and an example, see Creating Alias Resource Record +// Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingAliasRRSets.html) +// in the Amazon Route 53 Developer Guide +// +// . +type AliasTarget struct { + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. + // + // For more information and an example, see Creating Alias Resource Record + // Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingAliasRRSets.html) + // in the Amazon Route 53 Developer Guide + // + // . + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: A boolean value that indicates whether this + // Resource Record Set should respect the health status of any health checks + // associated with the ALIAS target record which it is linked to. + // + // For more information and an example, see Creating Alias Resource Record + // Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingAliasRRSets.html) + // in the Amazon Route 53 Developer Guide + // + // . + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value of the hosted zone ID for the + // AWS resource. + // + // For more information and an example, see Creating Alias Resource Record + // Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingAliasRRSets.html) + // in the Amazon Route 53 Developer Guide + // + // . + HostedZoneID *string `locationName:"HostedZoneId" type:"string" required:"true"` + + metadataAliasTarget `json:"-", xml:"-"` +} + +type metadataAliasTarget struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` + + metadataAssociateVPCWithHostedZoneInput `json:"-", xml:"-"` +} + +type metadataAssociateVPCWithHostedZoneInput struct { + SDKShapeTraits bool `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + metadataAssociateVPCWithHostedZoneOutput `json:"-", xml:"-"` +} + +type metadataAssociateVPCWithHostedZoneOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + // The action to perform. + // + // Valid values: CREATE | DELETE | UPSERT + Action *string `type:"string" required:"true"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` + + metadataChange `json:"-", xml:"-"` +} + +type metadataChange struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` + + metadataChangeBatch `json:"-", xml:"-"` +} + +type metadataChangeBatch struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + ID *string `locationName:"Id" type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC), which is synonymous with Greenwich Mean Time in this context. + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + metadataChangeInfo `json:"-", xml:"-"` +} + +type metadataChangeInfo struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataChangeResourceRecordSetsInput `json:"-", xml:"-"` +} + +type metadataChangeResourceRecordSetsInput struct { + SDKShapeTraits bool `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + metadataChangeResourceRecordSetsOutput `json:"-", xml:"-"` +} + +type metadataChangeResourceRecordSetsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceID *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true"` + + metadataChangeTagsForResourceInput `json:"-", xml:"-"` +} + +type metadataChangeTagsForResourceInput struct { + SDKShapeTraits bool `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + metadataChangeTagsForResourceOutput `json:"-", xml:"-"` +} + +type metadataChangeTagsForResourceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + metadataCreateHealthCheckInput `json:"-", xml:"-"` +} + +type metadataCreateHealthCheckInput struct { + SDKShapeTraits bool `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + metadataCreateHealthCheckOutput `json:"-", xml:"-"` +} + +type metadataCreateHealthCheckOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetID *string `locationName:"DelegationSetId" type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Route 53 assumes that the + // domain name is fully qualified. This means that Route 53 treats www.example.com + // (without a trailing dot) and www.example.com. (with a trailing dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` + + metadataCreateHostedZoneInput `json:"-", xml:"-"` +} + +type metadataCreateHostedZoneInput struct { + SDKShapeTraits bool `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` + + metadataCreateHostedZoneOutput `json:"-", xml:"-"` +} + +type metadataCreateHostedZoneOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type CreateReusableDelegationSetInput struct { + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneID *string `locationName:"HostedZoneId" type:"string"` + + metadataCreateReusableDelegationSetInput `json:"-", xml:"-"` +} + +type metadataCreateReusableDelegationSetInput struct { + SDKShapeTraits bool `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +type CreateReusableDelegationSetOutput struct { + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + metadataCreateReusableDelegationSetOutput `json:"-", xml:"-"` +} + +type metadataCreateReusableDelegationSetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains name server information. +type DelegationSet struct { + CallerReference *string `type:"string"` + + ID *string `locationName:"Id" type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" type:"list" required:"true"` + + metadataDelegationSet `json:"-", xml:"-"` +} + +type metadataDelegationSet struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + // The ID of the health check to delete. + HealthCheckID *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + metadataDeleteHealthCheckInput `json:"-", xml:"-"` +} + +type metadataDeleteHealthCheckInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + metadataDeleteHealthCheckOutput `json:"-", xml:"-"` +} + +type metadataDeleteHealthCheckOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + // The ID of the hosted zone you want to delete. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataDeleteHostedZoneInput `json:"-", xml:"-"` +} + +type metadataDeleteHostedZoneInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + metadataDeleteHostedZoneOutput `json:"-", xml:"-"` +} + +type metadataDeleteHostedZoneOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + // The ID of the reusable delegation set you want to delete. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataDeleteReusableDelegationSetInput `json:"-", xml:"-"` +} + +type metadataDeleteReusableDelegationSetInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + metadataDeleteReusableDelegationSetOutput `json:"-", xml:"-"` +} + +type metadataDeleteReusableDelegationSetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` + + metadataDisassociateVPCFromHostedZoneInput `json:"-", xml:"-"` +} + +type metadataDisassociateVPCFromHostedZoneInput struct { + SDKShapeTraits bool `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + metadataDisassociateVPCFromHostedZoneOutput `json:"-", xml:"-"` +} + +type metadataDisassociateVPCFromHostedZoneOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `type:"string"` + + metadataGeoLocation `json:"-", xml:"-"` +} + +type metadataGeoLocation struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `type:"string"` + + metadataGeoLocationDetails `json:"-", xml:"-"` +} + +type metadataGeoLocationDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// The input for a GetChange request. +type GetChangeInput struct { + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataGetChangeInput `json:"-", xml:"-"` +} + +type metadataGetChangeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + metadataGetChangeOutput `json:"-", xml:"-"` +} + +type metadataGetChangeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// Empty request. +type GetCheckerIPRangesInput struct { + metadataGetCheckerIPRangesInput `json:"-", xml:"-"` +} + +type metadataGetCheckerIPRangesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIPRangesOutput struct { + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIPRanges []*string `locationName:"CheckerIpRanges" type:"list" required:"true"` + + metadataGetCheckerIPRangesOutput `json:"-", xml:"-"` +} + +type metadataGetCheckerIPRangesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" type:"string"` + + metadataGetGeoLocationInput `json:"-", xml:"-"` +} + +type metadataGetGeoLocationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` + + metadataGetGeoLocationOutput `json:"-", xml:"-"` +} + +type metadataGetGeoLocationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +type GetHealthCheckCountInput struct { + metadataGetHealthCheckCountInput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckCountInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` + + metadataGetHealthCheckCountOutput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckCountOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + // The ID of the health check to retrieve. + HealthCheckID *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + metadataGetHealthCheckInput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckID *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + metadataGetHealthCheckLastFailureReasonInput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckLastFailureReasonInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + // A list that contains one HealthCheckObservation element for each Route 53 + // health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` + + metadataGetHealthCheckLastFailureReasonOutput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckLastFailureReasonOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + metadataGetHealthCheckOutput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + // The ID of the health check for which you want to retrieve the most recent + // status. + HealthCheckID *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + metadataGetHealthCheckStatusInput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckStatusInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + // A list that contains one HealthCheckObservation element for each Route 53 + // health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` + + metadataGetHealthCheckStatusOutput `json:"-", xml:"-"` +} + +type metadataGetHealthCheckStatusOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +type GetHostedZoneCountInput struct { + metadataGetHostedZoneCountInput `json:"-", xml:"-"` +} + +type metadataGetHostedZoneCountInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` + + metadataGetHostedZoneCountOutput `json:"-", xml:"-"` +} + +type metadataGetHostedZoneCountOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataGetHostedZoneInput `json:"-", xml:"-"` +} + +type metadataGetHostedZoneInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" type:"list"` + + metadataGetHostedZoneOutput `json:"-", xml:"-"` +} + +type metadataGetHostedZoneOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataGetReusableDelegationSetInput `json:"-", xml:"-"` +} + +type metadataGetReusableDelegationSetInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + metadataGetReusableDelegationSetOutput `json:"-", xml:"-"` +} + +type metadataGetReusableDelegationSetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + // A unique string that identifies the request to create the health check. + CallerReference *string `type:"string" required:"true"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `type:"long" required:"true"` + + // The ID of the specified health check. + ID *string `locationName:"Id" type:"string" required:"true"` + + metadataHealthCheck `json:"-", xml:"-"` +} + +type metadataHealthCheck struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + // The number of consecutive health checks that an endpoint must pass or fail + // for Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `type:"integer"` + + // The number of seconds between the time that Route 53 gets a response from + // your endpoint and the time that it sends the next health-check request. + // + // Each Route 53 health checker makes requests at this interval. Valid values + // are 10 and 30. The default value is 30. + RequestInterval *int64 `type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks, HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH. + Type *string `type:"string" required:"true"` + + metadataHealthCheckConfig `json:"-", xml:"-"` +} + +type metadataHealthCheckConfig struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the IP address of a Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + // The IP address of the Route 53 health checker that performed the health check. + IPAddress *string `type:"string"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` + + metadataHealthCheckObservation `json:"-", xml:"-"` +} + +type metadataHealthCheckObservation struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + ID *string `locationName:"Id" type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Route 53 assumes that the + // domain name is fully qualified. This means that Route 53 treats www.example.com + // (without a trailing dot) and www.example.com. (with a trailing dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` + + metadataHostedZone `json:"-", xml:"-"` +} + +type metadataHostedZone struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + // A value that indicates whether this is a private hosted zone. The value is + // returned in the response; do not specify it in the request. + PrivateZone *bool `type:"boolean"` + + metadataHostedZoneConfig `json:"-", xml:"-"` +} + +type metadataHostedZoneConfig struct { + SDKShapeTraits bool `type:"structure"` +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" type:"string"` + + metadataListGeoLocationsInput `json:"-", xml:"-"` +} + +type metadataListGeoLocationsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, + // ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode + // elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is a continent location. + NextContinentCode *string `type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is not a continent location. + NextCountryCode *string `type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location has a subdivision. + NextSubdivisionCode *string `type:"string"` + + metadataListGeoLocationsOutput `json:"-", xml:"-"` +} + +type metadataListGeoLocationsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero or more HealthCheck child elements. By default, the list of health checks +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the health check that the list begins with. +// +// Route 53 returns a maximum of 100 items. If you set MaxItems to a value +// greater than 100, Route 53 returns only the first 100. +type ListHealthChecksInput struct { + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + metadataListHealthChecksInput `json:"-", xml:"-"` +} + +type metadataListHealthChecksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of ListHealthChecksResponse$IsTruncated in the response is true. + // Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated + // is true, make another request to ListHealthChecks and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` + + metadataListHealthChecksOutput `json:"-", xml:"-"` +} + +type metadataListHealthChecksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// For more information about listing hosted zones, see Listing the Hosted +// Zones for an AWS Account (http://docs.amazonwebservices.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +type ListHostedZonesByNameInput struct { + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneID *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + metadataListHostedZonesByNameInput `json:"-", xml:"-"` +} + +type metadataListHostedZonesByNameInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneID *string `locationName:"HostedZoneId" type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesByNameResponse$IsTruncated in the response is + // true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName + // and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively + // to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextDNSName *string `type:"string"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextHostedZoneID *string `locationName:"NextHostedZoneId" type:"string"` + + metadataListHostedZonesByNameOutput `json:"-", xml:"-"` +} + +type metadataListHostedZonesByNameOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero or more HostedZone child elements. By default, the list of hosted zones +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the hosted zone that the list begins with. For more information +// about listing hosted zones, see Listing the Hosted Zones for an AWS Account +// (http://docs.amazonwebservices.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// Route 53 returns a maximum of 100 items. If you set MaxItems to a value +// greater than 100, Route 53 returns only the first 100. +type ListHostedZonesInput struct { + DelegationSetID *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + metadataListHostedZonesInput `json:"-", xml:"-"` +} + +type metadataListHostedZonesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesResponse$IsTruncated in the response is true. + // Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated + // is true, make another request to ListHostedZones and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` + + metadataListHostedZonesOutput `json:"-", xml:"-"` +} + +type metadataListHostedZonesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier + // from the previous response to get the next resource record set that has the + // current DNS name and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string"` + + metadataListResourceRecordSetsInput `json:"-", xml:"-"` +} + +type metadataListResourceRecordSetsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the ListResourceRecordSetsResponse$NextRecordName + // element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordType *string `type:"string"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` + + metadataListResourceRecordSetsOutput `json:"-", xml:"-"` +} + +type metadataListResourceRecordSetsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Route 53 returns a maximum of 100 items. If you set MaxItems to a value +// greater than 100, Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + metadataListReusableDelegationSetsInput `json:"-", xml:"-"` +} + +type metadataListReusableDelegationSetsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated + // in the response is true. Call ListReusableDelegationSets again and specify + // the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker + // element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element to get the next page + // of results. + NextMarker *string `type:"string"` + + metadataListReusableDelegationSetsOutput `json:"-", xml:"-"` +} + +type metadataListReusableDelegationSetsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + // The ID of the resource for which you want to retrieve tags. + ResourceID *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true"` + + metadataListTagsForResourceInput `json:"-", xml:"-"` +} + +type metadataListTagsForResourceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` + + metadataListTagsForResourceOutput `json:"-", xml:"-"` +} + +type metadataListTagsForResourceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIDs []*string `locationName:"ResourceIds" locationNameList:"ResourceId" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true"` + + metadataListTagsForResourcesInput `json:"-", xml:"-"` +} + +type metadataListTagsForResourcesInput struct { + SDKShapeTraits bool `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` + + metadataListTagsForResourcesOutput `json:"-", xml:"-"` +} + +type metadataListTagsForResourcesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + // The value of the Value element for the current resource record set. + Value *string `type:"string" required:"true"` + + metadataResourceRecord `json:"-", xml:"-"` +} + +type metadataResourceRecord struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that indicates whether + // the current resource record set is a primary or secondary resource record + // set. A failover set may contain at most one resource record set marked as + // primary and one resource record set marked as secondary. A resource record + // set marked as primary will be returned if any of the following are true: + // (1) an associated health check is passing, (2) if the resource record set + // is an alias with the evaluate target health and at least one target resource + // record set is healthy, (3) both the primary and secondary resource record + // set are failing health checks or (4) there is no secondary resource record + // set. A secondary resource record set will be returned if: (1) the primary + // is failing a health check and either the secondary is passing a health check + // or has no associated health check, or (2) there is no primary resource record + // set. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string"` + + // Geo location resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that specifies the geo + // location for the current resource record set. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckID *string `locationName:"HealthCheckId" type:"string"` + + // The domain name of the current resource record set. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: Among resource record sets that + // have the same combination of DNS name and type, a value that specifies the + // AWS region for the current resource record set. + Region *string `type:"string"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. + SetIdentifier *string `type:"string"` + + // The cache time to live for the current resource record set. + TTL *int64 `type:"long"` + + // The type of the current resource record set. + Type *string `type:"string" required:"true"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines what portion + // of traffic for the current resource record set is routed to the associated + // location. + Weight *int64 `type:"long"` + + metadataResourceRecordSet `json:"-", xml:"-"` +} + +type metadataResourceRecordSet struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + // The ID for the specified resource. + ResourceID *string `locationName:"ResourceId" type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + metadataResourceTagSet `json:"-", xml:"-"` +} + +type metadataResourceTagSet struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC), which is synonymous with Greenwich Mean Time in this context. + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` + + metadataStatusReport `json:"-", xml:"-"` +} + +type metadataStatusReport struct { + SDKShapeTraits bool `type:"structure"` +} + +// A single tag containing a key and value. +type Tag struct { + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` + + metadataTag `json:"-", xml:"-"` +} + +type metadataTag struct { + SDKShapeTraits bool `type:"structure"` +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + // The number of consecutive health checks that an endpoint must pass or fail + // for Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckID *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Route 53 compares this + // value with the current value in the health check, which prevents you from + // updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `type:"long"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + // The port on which you want Route 53 to open a connection to perform health + // checks. + // + // Specify this value only if you want to change it. + Port *int64 `type:"integer"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Route 53 to search for in the response body from the specified resource. + // If the string appears in the response body, Route 53 considers the resource + // healthy. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` + + metadataUpdateHealthCheckInput `json:"-", xml:"-"` +} + +type metadataUpdateHealthCheckInput struct { + SDKShapeTraits bool `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +type UpdateHealthCheckOutput struct { + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + metadataUpdateHealthCheckOutput `json:"-", xml:"-"` +} + +type metadataUpdateHealthCheckOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + ID *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + metadataUpdateHostedZoneCommentInput `json:"-", xml:"-"` +} + +type metadataUpdateHostedZoneCommentInput struct { + SDKShapeTraits bool `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + metadataUpdateHostedZoneCommentOutput `json:"-", xml:"-"` +} + +type metadataUpdateHostedZoneCommentOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type VPC struct { + // A VPC ID + VPCID *string `locationName:"VPCId" type:"string"` + + VPCRegion *string `type:"string"` + + metadataVPC `json:"-", xml:"-"` +} + +type metadataVPC struct { + SDKShapeTraits bool `type:"structure"` +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations.go new file mode 100644 index 000000000..341e20d81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations.go @@ -0,0 +1,20 @@ +package route53 + +import ( + "regexp" + + "github.com/awslabs/aws-sdk-go/aws" +) + +func init() { + initService = func(s *aws.Service) { + s.Handlers.Build.PushBack(sanitizeURL) + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *aws.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations_test.go new file mode 100644 index 000000000..f0fe7e954 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/customizations_test.go @@ -0,0 +1,20 @@ +package route53_test + +import ( + "testing" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/util/utilassert" + "github.com/awslabs/aws-sdk-go/service/route53" +) + +func TestBuildCorrectURI(t *testing.T) { + svc := route53.New(nil) + req, _ := svc.GetHostedZoneRequest(&route53.GetHostedZoneInput{ + ID: aws.String("/hostedzone/ABCDEFG"), + }) + + req.Build() + + utilassert.Match(t, `\/hostedzone\/ABCDEFG$`, req.HTTPRequest.URL.String()) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/examples_test.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/examples_test.go new file mode 100644 index 000000000..5584c5d6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/examples_test.go @@ -0,0 +1,714 @@ +package route53_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/aws/awsutil" + "github.com/awslabs/aws-sdk-go/service/route53" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53_AssociateVPCWithHostedZone() { + svc := route53.New(nil) + + params := &route53.AssociateVPCWithHostedZoneInput{ + HostedZoneID: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCID: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("AssociateVPCComment"), + } + resp, err := svc.AssociateVPCWithHostedZone(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ChangeResourceRecordSets() { + svc := route53.New(nil) + + params := &route53.ChangeResourceRecordSetsInput{ + ChangeBatch: &route53.ChangeBatch{ // Required + Changes: []*route53.Change{ // Required + &route53.Change{ // Required + Action: aws.String("ChangeAction"), // Required + ResourceRecordSet: &route53.ResourceRecordSet{ // Required + Name: aws.String("DNSName"), // Required + Type: aws.String("RRType"), // Required + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String("DNSName"), // Required + EvaluateTargetHealth: aws.Boolean(true), // Required + HostedZoneID: aws.String("ResourceId"), // Required + }, + Failover: aws.String("ResourceRecordSetFailover"), + GeoLocation: &route53.GeoLocation{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + }, + HealthCheckID: aws.String("HealthCheckId"), + Region: aws.String("ResourceRecordSetRegion"), + ResourceRecords: []*route53.ResourceRecord{ + &route53.ResourceRecord{ // Required + Value: aws.String("RData"), // Required + }, + // More values... + }, + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + TTL: aws.Long(1), + Weight: aws.Long(1), + }, + }, + // More values... + }, + Comment: aws.String("ResourceDescription"), + }, + HostedZoneID: aws.String("ResourceId"), // Required + } + resp, err := svc.ChangeResourceRecordSets(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ChangeTagsForResource() { + svc := route53.New(nil) + + params := &route53.ChangeTagsForResourceInput{ + ResourceID: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + AddTags: []*route53.Tag{ + &route53.Tag{ // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + RemoveTagKeys: []*string{ + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.ChangeTagsForResource(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_CreateHealthCheck() { + svc := route53.New(nil) + + params := &route53.CreateHealthCheckInput{ + CallerReference: aws.String("HealthCheckNonce"), // Required + HealthCheckConfig: &route53.HealthCheckConfig{ // Required + Type: aws.String("HealthCheckType"), // Required + FailureThreshold: aws.Long(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + IPAddress: aws.String("IPAddress"), + Port: aws.Long(1), + RequestInterval: aws.Long(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + }, + } + resp, err := svc.CreateHealthCheck(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_CreateHostedZone() { + svc := route53.New(nil) + + params := &route53.CreateHostedZoneInput{ + CallerReference: aws.String("Nonce"), // Required + Name: aws.String("DNSName"), // Required + DelegationSetID: aws.String("ResourceId"), + HostedZoneConfig: &route53.HostedZoneConfig{ + Comment: aws.String("ResourceDescription"), + PrivateZone: aws.Boolean(true), + }, + VPC: &route53.VPC{ + VPCID: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + } + resp, err := svc.CreateHostedZone(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_CreateReusableDelegationSet() { + svc := route53.New(nil) + + params := &route53.CreateReusableDelegationSetInput{ + CallerReference: aws.String("Nonce"), // Required + HostedZoneID: aws.String("ResourceId"), + } + resp, err := svc.CreateReusableDelegationSet(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_DeleteHealthCheck() { + svc := route53.New(nil) + + params := &route53.DeleteHealthCheckInput{ + HealthCheckID: aws.String("HealthCheckId"), // Required + } + resp, err := svc.DeleteHealthCheck(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_DeleteHostedZone() { + svc := route53.New(nil) + + params := &route53.DeleteHostedZoneInput{ + ID: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteHostedZone(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_DeleteReusableDelegationSet() { + svc := route53.New(nil) + + params := &route53.DeleteReusableDelegationSetInput{ + ID: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteReusableDelegationSet(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_DisassociateVPCFromHostedZone() { + svc := route53.New(nil) + + params := &route53.DisassociateVPCFromHostedZoneInput{ + HostedZoneID: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCID: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("DisassociateVPCComment"), + } + resp, err := svc.DisassociateVPCFromHostedZone(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetChange() { + svc := route53.New(nil) + + params := &route53.GetChangeInput{ + ID: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChange(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetCheckerIPRanges() { + svc := route53.New(nil) + + var params *route53.GetCheckerIPRangesInput + resp, err := svc.GetCheckerIPRanges(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetGeoLocation() { + svc := route53.New(nil) + + params := &route53.GetGeoLocationInput{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.GetGeoLocation(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHealthCheck() { + svc := route53.New(nil) + + params := &route53.GetHealthCheckInput{ + HealthCheckID: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheck(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHealthCheckCount() { + svc := route53.New(nil) + + var params *route53.GetHealthCheckCountInput + resp, err := svc.GetHealthCheckCount(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHealthCheckLastFailureReason() { + svc := route53.New(nil) + + params := &route53.GetHealthCheckLastFailureReasonInput{ + HealthCheckID: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckLastFailureReason(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHealthCheckStatus() { + svc := route53.New(nil) + + params := &route53.GetHealthCheckStatusInput{ + HealthCheckID: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckStatus(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHostedZone() { + svc := route53.New(nil) + + params := &route53.GetHostedZoneInput{ + ID: aws.String("ResourceId"), // Required + } + resp, err := svc.GetHostedZone(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetHostedZoneCount() { + svc := route53.New(nil) + + var params *route53.GetHostedZoneCountInput + resp, err := svc.GetHostedZoneCount(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_GetReusableDelegationSet() { + svc := route53.New(nil) + + params := &route53.GetReusableDelegationSetInput{ + ID: aws.String("ResourceId"), // Required + } + resp, err := svc.GetReusableDelegationSet(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListGeoLocations() { + svc := route53.New(nil) + + params := &route53.ListGeoLocationsInput{ + MaxItems: aws.String("PageMaxItems"), + StartContinentCode: aws.String("GeoLocationContinentCode"), + StartCountryCode: aws.String("GeoLocationCountryCode"), + StartSubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.ListGeoLocations(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListHealthChecks() { + svc := route53.New(nil) + + params := &route53.ListHealthChecksInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHealthChecks(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListHostedZones() { + svc := route53.New(nil) + + params := &route53.ListHostedZonesInput{ + DelegationSetID: aws.String("ResourceId"), + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZones(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListHostedZonesByName() { + svc := route53.New(nil) + + params := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String("DNSName"), + HostedZoneID: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZonesByName(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListResourceRecordSets() { + svc := route53.New(nil) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneID: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + StartRecordIdentifier: aws.String("ResourceRecordSetIdentifier"), + StartRecordName: aws.String("DNSName"), + StartRecordType: aws.String("RRType"), + } + resp, err := svc.ListResourceRecordSets(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListReusableDelegationSets() { + svc := route53.New(nil) + + params := &route53.ListReusableDelegationSetsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListReusableDelegationSets(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListTagsForResource() { + svc := route53.New(nil) + + params := &route53.ListTagsForResourceInput{ + ResourceID: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_ListTagsForResources() { + svc := route53.New(nil) + + params := &route53.ListTagsForResourcesInput{ + ResourceIDs: []*string{ // Required + aws.String("TagResourceId"), // Required + // More values... + }, + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResources(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_UpdateHealthCheck() { + svc := route53.New(nil) + + params := &route53.UpdateHealthCheckInput{ + HealthCheckID: aws.String("HealthCheckId"), // Required + FailureThreshold: aws.Long(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthCheckVersion: aws.Long(1), + IPAddress: aws.String("IPAddress"), + Port: aws.Long(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + } + resp, err := svc.UpdateHealthCheck(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} + +func ExampleRoute53_UpdateHostedZoneComment() { + svc := route53.New(nil) + + params := &route53.UpdateHostedZoneCommentInput{ + ID: aws.String("ResourceId"), // Required + Comment: aws.String("ResourceDescription"), + } + resp, err := svc.UpdateHostedZoneComment(params) + + if awserr := aws.Error(err); awserr != nil { + // A service error occurred. + fmt.Println("Error:", awserr.Code, awserr.Message) + } else if err != nil { + // A non-service error occurred. + panic(err) + } + + // Pretty-print the response data. + fmt.Println(awsutil.StringValue(resp)) +} diff --git a/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/service.go b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/service.go new file mode 100644 index 000000000..a2f792798 --- /dev/null +++ b/Godeps/_workspace/src/github.com/awslabs/aws-sdk-go/service/route53/service.go @@ -0,0 +1,59 @@ +package route53 + +import ( + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/internal/protocol/restxml" + "github.com/awslabs/aws-sdk-go/internal/signer/v4" +) + +// Route53 is a client for Route 53. +type Route53 struct { + *aws.Service +} + +// Used for custom service initialization logic +var initService func(*aws.Service) + +// Used for custom request initialization logic +var initRequest func(*aws.Request) + +// New returns a new Route53 client. +func New(config *aws.Config) *Route53 { + if config == nil { + config = &aws.Config{} + } + + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "route53", + APIVersion: "2013-04-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(restxml.Build) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + // Run custom service initialization if present + if initService != nil { + initService(service) + } + + return &Route53{service} +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/braintree/manners/LICENSE b/Godeps/_workspace/src/github.com/braintree/manners/LICENSE new file mode 100644 index 000000000..91ef5beed --- /dev/null +++ b/Godeps/_workspace/src/github.com/braintree/manners/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Braintree, a division of PayPal, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/braintree/manners/helper_test.go b/Godeps/_workspace/src/github.com/braintree/manners/helper_test.go new file mode 100644 index 000000000..ea721a180 --- /dev/null +++ b/Godeps/_workspace/src/github.com/braintree/manners/helper_test.go @@ -0,0 +1,34 @@ +package manners + +import ( + "net/http" + "time" +) + +// A response handler that blocks until it receives a signal; simulates an +// arbitrarily long web request. The "ready" channel is to prevent a race +// condition in the test where the test moves on before the server is ready +// to handle the request. +func newBlockingHandler(ready, done chan bool) *blockingHandler { + return &blockingHandler{ready, done} +} + +type blockingHandler struct { + ready chan bool + done chan bool +} + +func (h *blockingHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + h.ready <- true + time.Sleep(1e2) + h.done <- true +} + +// A response handler that does nothing. +func newTestHandler() testHandler { + return testHandler{} +} + +type testHandler struct{} + +func (h testHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {} diff --git a/Godeps/_workspace/src/github.com/braintree/manners/listener.go b/Godeps/_workspace/src/github.com/braintree/manners/listener.go new file mode 100644 index 000000000..dd84e4a2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/braintree/manners/listener.go @@ -0,0 +1,49 @@ +package manners + +import ( + "net" + "sync" +) + +func NewListener(l net.Listener, s *GracefulServer) *GracefulListener { + return &GracefulListener{l, true, s, sync.RWMutex{}} +} + +// A GracefulListener differs from a standard net.Listener in one way: if +// Accept() is called after it is gracefully closed, it returns a +// listenerAlreadyClosed error. The GracefulServer will ignore this +// error. +type GracefulListener struct { + net.Listener + open bool + server *GracefulServer + rw sync.RWMutex +} + +func (l *GracefulListener) Accept() (net.Conn, error) { + conn, err := l.Listener.Accept() + if err != nil { + l.rw.RLock() + defer l.rw.RUnlock() + if !l.open { + err = listenerAlreadyClosed{err} + } + return nil, err + } + return conn, nil +} + +func (l *GracefulListener) Close() error { + l.rw.Lock() + defer l.rw.Unlock() + if !l.open { + return nil + } + l.open = false + err := l.Listener.Close() + return err +} + +type listenerAlreadyClosed struct { + error +} diff --git a/Godeps/_workspace/src/github.com/braintree/manners/server.go b/Godeps/_workspace/src/github.com/braintree/manners/server.go new file mode 100644 index 000000000..a79246668 --- /dev/null +++ b/Godeps/_workspace/src/github.com/braintree/manners/server.go @@ -0,0 +1,83 @@ +package manners + +import ( + "net" + "net/http" + "sync" +) + +// Creates a new GracefulServer. The server will begin shutting down when +// a value is passed to the Shutdown channel. +func NewServer() *GracefulServer { + return &GracefulServer{ + Shutdown: make(chan bool), + } +} + +// A GracefulServer maintains a WaitGroup that counts how many in-flight +// requests the server is handling. When it receives a shutdown signal, +// it stops accepting new requests but does not actually shut down until +// all in-flight requests terminate. +type GracefulServer struct { + Shutdown chan bool + wg sync.WaitGroup + shutdownHandler func() + InnerServer http.Server +} + +// A helper function that emulates the functionality of http.ListenAndServe. +func (s *GracefulServer) ListenAndServe(addr string, handler http.Handler) error { + oldListener, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + listener := NewListener(oldListener, s) + err = s.Serve(listener, handler) + return err +} + +// Similar to http.Serve. The listener passed must wrap a GracefulListener. +func (s *GracefulServer) Serve(listener net.Listener, handler http.Handler) error { + s.shutdownHandler = func() { listener.Close() } + s.listenForShutdown() + s.InnerServer.Handler = handler + s.InnerServer.ConnState = func(conn net.Conn, newState http.ConnState) { + switch newState { + case http.StateNew: + s.StartRoutine() + case http.StateClosed, http.StateHijacked: + s.FinishRoutine() + } + } + err := s.InnerServer.Serve(listener) + + // This block is reached when the server has received a shut down command. + if err == nil { + s.wg.Wait() + return nil + } else if _, ok := err.(listenerAlreadyClosed); ok { + s.wg.Wait() + return nil + } + return err +} + +// Increments the server's WaitGroup. Use this if a web request starts more +// goroutines and these goroutines are not guaranteed to finish before the +// request. +func (s *GracefulServer) StartRoutine() { + s.wg.Add(1) +} + +// Decrement the server's WaitGroup. Used this to complement StartRoutine(). +func (s *GracefulServer) FinishRoutine() { + s.wg.Done() +} + +func (s *GracefulServer) listenForShutdown() { + go func() { + <-s.Shutdown + s.shutdownHandler() + }() +} diff --git a/Godeps/_workspace/src/github.com/braintree/manners/server_test.go b/Godeps/_workspace/src/github.com/braintree/manners/server_test.go new file mode 100644 index 000000000..0da015566 --- /dev/null +++ b/Godeps/_workspace/src/github.com/braintree/manners/server_test.go @@ -0,0 +1,71 @@ +package manners + +import ( + "net/http" + "testing" +) + +// Tests that the server allows in-flight requests to complete before shutting +// down. +func TestGracefulness(t *testing.T) { + ready := make(chan bool) + done := make(chan bool) + + exited := false + + handler := newBlockingHandler(ready, done) + server := NewServer() + + go func() { + err := server.ListenAndServe(":7000", handler) + if err != nil { + t.Error(err) + } + + exited = true + }() + + go func() { + _, err := http.Get("http://localhost:7000") + if err != nil { + t.Error(err) + } + }() + + // This will block until the server is inside the handler function. + <-ready + + server.Shutdown <- true + <-done + + if exited { + t.Fatal("The request did not complete before server exited") + } else { + // The handler is being allowed to run to completion; test passes. + } +} + +// Tests that the server begins to shut down when told to and does not accept +// new requests +func TestShutdown(t *testing.T) { + handler := newTestHandler() + server := NewServer() + exited := make(chan bool) + + go func() { + err := server.ListenAndServe(":7100", handler) + if err != nil { + t.Error(err) + } + exited <- true + }() + + server.Shutdown <- true + + <-exited + _, err := http.Get("http://localhost:7100") + + if err == nil { + t.Fatal("Did not receive an error when trying to connect to server.") + } +} diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/.gitignore b/Godeps/_workspace/src/github.com/go-gorp/gorp/.gitignore new file mode 100644 index 000000000..8a06adea5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/.gitignore @@ -0,0 +1,8 @@ +_test +_testmain.go +_obj +*~ +*.6 +6.out +gorptest.bin +tmp diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/.travis.yml b/Godeps/_workspace/src/github.com/go-gorp/gorp/.travis.yml new file mode 100644 index 000000000..6df5edf1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/.travis.yml @@ -0,0 +1,23 @@ +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +services: + - mysql + - postgres + - sqlite3 + +before_script: + - mysql -e "CREATE DATABASE gorptest;" + - mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'" + - psql -c "CREATE DATABASE gorptest;" -U postgres + - psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres + - go get github.com/lib/pq + - go get github.com/mattn/go-sqlite3 + - go get github.com/ziutek/mymysql/godrv + - go get github.com/go-sql-driver/mysql + +script: ./test_all.sh diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/LICENSE b/Godeps/_workspace/src/github.com/go-gorp/gorp/LICENSE new file mode 100644 index 000000000..b661111d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/Makefile b/Godeps/_workspace/src/github.com/go-gorp/gorp/Makefile new file mode 100644 index 000000000..3a27ae194 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/Makefile @@ -0,0 +1,6 @@ +include $(GOROOT)/src/Make.inc + +TARG = github.com/go-gorp/gorp +GOFILES = gorp.go dialect.go + +include $(GOROOT)/src/Make.pkg \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/dialect.go b/Godeps/_workspace/src/github.com/go-gorp/gorp/dialect.go new file mode 100644 index 000000000..8277a965e --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/dialect.go @@ -0,0 +1,696 @@ +package gorp + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existance clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} + +/////////////////////////////////////////////////////// +// sqlite3 // +///////////// + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// PostgreSQL // +//////////////// + +type PostgresDialect struct { + suffix string +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d PostgresDialect) QuoteField(f string) string { + return `"` + strings.ToLower(f) + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// MySQL // +/////////// + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// Sql Server // +//////////////// + +// Implementation of Dialect for Microsoft SQL Server databases. +// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + suffix string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint" + case reflect.Float32: + return "real" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqlServerDialect) CreateTableSuffix() string { + + return d.suffix +} + +func (d SqlServerDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return table + } + return schema + "." + table +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +/////////////////////////////////////////////////////// +// Oracle // +/////////// + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "default" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + rows, err := exec.query(insertSql, params...) + if err != nil { + return 0, err + } + defer rows.Close() + + if rows.Next() { + var id int64 + err := rows.Scan(&id) + return id, err + } + + return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/errors.go b/Godeps/_workspace/src/github.com/go-gorp/gorp/errors.go new file mode 100644 index 000000000..356d68475 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/errors.go @@ -0,0 +1,26 @@ +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp.go b/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp.go new file mode 100644 index 000000000..4c91b6f78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp.go @@ -0,0 +1,2178 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// +package gorp + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// Oracle String (empty string is null) +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + switch t := value.(type) { + case time.Time: + nt.Time, nt.Valid = t, true + case []byte: + nt.Valid = false + for _, dtfmt := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + "2006-01-02 15:04:05-07:00", + } { + var err error + if nt.Time, err = time.Parse(dtfmt, string(t)); err == nil { + nt.Valid = true + break + } + } + } + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + tables []*TableMap + logger GorpLogger + logPrefix string +} + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0) + for _, name := range fieldNames { + columns = append(columns, name) + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (plan bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := t.insertPlan + if plan.query == "" { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + + x++ + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.insertPlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value) (bindInstance, error) { + plan := t.updatePlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.updatePlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := t.deletePlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.deletePlan = plan + } + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() bindPlan { + plan := t.getPlan + if plan.query == "" { + + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + t.getPlan = plan + } + + return plan +} + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + // Not used elsewhere + MaxSize int + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + dbmap *DbMap + tx *sql.Tx + closed bool +} + +// Executor exposes the sql.DB and sql.Tx Exec function so that it can be used +// on internal functions that convert named parameters for the Exec function. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, + args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + query(query string, args ...interface{}) (*sql.Rows, error) + queryRow(query string, args ...interface{}) *sql.Row +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +type GorpLogger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies GorpLogger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + tmap.Columns = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + } else { + columnName := f.Tag.Get("db") + if columnName == "" { + columnName = f.Name + } + gotype := f.Type + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + value := reflect.New(gotype).Interface() + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + gotype = reflect.TypeOf(scanner.Holder) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + } + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + + s := bytes.Buffer{} + + if strings.TrimSpace(table.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(m.Dialect.IfSchemaNotExists(schemaCreate, table.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", table.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(m.Dialect.IfTableNotExists(tableCreate, table.SchemaName, table.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + + x := 0 + for _, col := range table.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := m.Dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", m.Dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(table.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", m.Dialect.AutoIncrStr())) + } + + x++ + } + } + if len(table.keys) > 1 { + s.WriteString(", primary key (") + for x := range table.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(table.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(table.uniqueTogether) > 0 { + for _, columns := range table.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(m.Dialect.CreateTableSuffix()) + s.WriteString(m.Dialect.QuerySuffix()) + _, err = m.Exec(s.String()) + if err != nil { + break + } + } + return err +} + +// DropTable drops an individual table. Will throw an error +// if the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, false) +} + +// DropTable drops an individual table. Will NOT throw an error +// if the table does not exist. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return + } + } + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, addIfExists bool) error { + table := tableOrNil(m, t) + if table == nil { + return errors.New(fmt.Sprintf("table %s was not registered!", table.TableName)) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return exec(m, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFlot function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, "begin;") + } + tx, err := m.Db.Begin() + if err != nil { + return nil, err + } + return &Transaction{m, tx, false}, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t) + if table == nil { + return nil, errors.New(fmt.Sprintf("No table found for type: %v", t.Name())) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: No keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, nil) + } + return m.Db.Prepare(query) +} + +func tableOrNil(m *DbMap, t reflect.Type) *TableMap { + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + etype := reflect.TypeOf(elem.Interface()) + t, err := m.TableFor(etype, checkPK) + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) queryRow(query string, args ...interface{}) *sql.Row { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return m.Db.QueryRow(query, args...) +} + +func (m *DbMap) query(query string, args ...interface{}) (*sql.Rows, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return m.Db.Query(query, args...) +} + +func (m *DbMap) trace(started time.Time, query string, args ...interface{}) { + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s] (%v)", m.logPrefix, query, margs, (time.Now().Sub(started))) + } +} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + var v interface{} = a + if x, ok := v.(driver.Valuer); ok { + y, err := x.Value() + if err == nil { + v = y + } + } + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +/////////////// + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return exec(t, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "commit;") + } + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "rollback;") + } + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := t.tx.Exec(query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := t.tx.Exec(query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := t.tx.Exec(query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + return t.tx.Prepare(query) +} + +func (t *Transaction) queryRow(query string, args ...interface{}) *sql.Row { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return t.tx.QueryRow(query, args...) +} + +func (t *Transaction) query(query string, args ...interface{}) (*sql.Rows, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return t.tx.Query(query, args...) +} + +/////////////// + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +/////////////// + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + if colToFieldIndex, err = columnToFieldIndex(m, t, cols); err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy sql.RawBytes + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} + +// Calls the Exec function on the executor, but attempts to expand any eligible named +// query arguments first. +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + var dbMap *DbMap + var executor executor + switch m := e.(type) { + case *DbMap: + executor = m.Db + dbMap = m + case *Transaction: + executor = m.tx + dbMap = m.dbmap + } + + if len(args) == 1 { + query, args = maybeExpandNamedQuery(dbMap, query, args) + } + + return executor.Exec(query, args...) +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + var ( + arg = args[0] + argval = reflect.ValueOf(arg) + ) + if argval.Kind() == reflect.Ptr { + argval = argval.Elem() + } + + if argval.Kind() == reflect.Map && argval.Type().Key().Kind() == reflect.String { + return expandNamedQuery(m, query, func(key string) reflect.Value { + return argval.MapIndex(reflect.ValueOf(key)) + }) + } + if argval.Kind() != reflect.Struct { + return query, args + } + if _, ok := arg.(time.Time); ok { + // time.Time is driver.Value + return query, args + } + if _, ok := arg.(driver.Valuer); ok { + // driver.Valuer will be converted to driver.Value. + return query, args + } + + return expandNamedQuery(m, query, argval.FieldByName) +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + fieldName = field.Tag.Get("db") + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: Cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: Cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + + plan := table.bindGet() + + v := reflect.New(t) + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.queryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: Cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: Cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} + +// PostUpdate() will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// PostUpdate() will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// PostUpdate() will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// PostInsert() will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// PreDelete() will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// PreUpdate() will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// PreInsert() will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp_test.go b/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp_test.go new file mode 100644 index 000000000..6e5618c1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/gorp_test.go @@ -0,0 +1,2170 @@ +package gorp + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "log" + "math/rand" + "os" + "reflect" + "strings" + "testing" + "time" + + _ "github.com/go-sql-driver/mysql" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + _ "github.com/ziutek/mymysql/godrv" +) + +// verify interface compliance +var _ Dialect = SqliteDialect{} +var _ Dialect = PostgresDialect{} +var _ Dialect = MySQLDialect{} +var _ Dialect = SqlServerDialect{} +var _ Dialect = OracleDialect{} + +type testable interface { + GetId() int64 + Rand() +} + +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 + IsPaid bool +} + +func (me *Invoice) GetId() int64 { return me.Id } +func (me *Invoice) Rand() { + me.Memo = fmt.Sprintf("random %d", rand.Int63()) + me.Created = rand.Int63() + me.Updated = rand.Int63() +} + +type InvoiceTag struct { + Id int64 `db:"myid"` + Created int64 `db:"myCreated"` + Updated int64 `db:"date_updated"` + Memo string + PersonId int64 `db:"person_id"` + IsPaid bool `db:"is_Paid"` +} + +func (me *InvoiceTag) GetId() int64 { return me.Id } +func (me *InvoiceTag) Rand() { + me.Memo = fmt.Sprintf("random %d", rand.Int63()) + me.Created = rand.Int63() + me.Updated = rand.Int63() +} + +// See: https://github.com/go-gorp/gorp/issues/175 +type AliasTransientField struct { + Id int64 `db:"id"` + Bar int64 `db:"-"` + BarStr string `db:"bar"` +} + +func (me *AliasTransientField) GetId() int64 { return me.Id } +func (me *AliasTransientField) Rand() { + me.BarStr = fmt.Sprintf("random %d", rand.Int63()) +} + +type OverriddenInvoice struct { + Invoice + Id string +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + Version int64 +} + +type FNameOnly struct { + FName string +} + +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string + LegacyVersion int64 +} + +type TableWithNull struct { + Id int64 + Str sql.NullString + Int64 sql.NullInt64 + Float64 sql.NullFloat64 + Bool sql.NullBool + Bytes []byte +} + +type WithIgnoredColumn struct { + internal int64 `db:"-"` + Id int64 + Created int64 +} + +type IdCreated struct { + Id int64 + Created int64 +} + +type IdCreatedExternal struct { + IdCreated + External int64 +} + +type WithStringPk struct { + Id string + Name string +} + +type CustomStringType string + +type TypeConversionExample struct { + Id int64 + PersonJSON Person + Name CustomStringType +} + +type PersonUInt32 struct { + Id uint32 + Name string +} + +type PersonUInt64 struct { + Id uint64 + Name string +} + +type PersonUInt16 struct { + Id uint16 + Name string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +type WithEmbeddedStructBeforeAutoincrField struct { + Names + Id int64 +} + +type WithEmbeddedAutoincr struct { + WithEmbeddedStruct + MiddleName string +} + +type Names struct { + FirstName string + LastName string +} + +type UniqueColumns struct { + FirstName string + LastName string + City string + ZipCode int64 +} + +type SingleColumnTable struct { + SomeId string +} + +type CustomDate struct { + time.Time +} + +type WithCustomDate struct { + Id int64 + Added CustomDate +} + +type WithNullTime struct { + Id int64 + Time NullTime +} + +type testTypeConverter struct{} + +func (me testTypeConverter) ToDb(val interface{}) (interface{}, error) { + + switch t := val.(type) { + case Person: + b, err := json.Marshal(t) + if err != nil { + return "", err + } + return string(b), nil + case CustomStringType: + return string(t), nil + case CustomDate: + return t.Time, nil + } + + return val, nil +} + +func (me testTypeConverter) FromDb(target interface{}) (CustomScanner, bool) { + switch target.(type) { + case *Person: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New("FromDb: Unable to convert Person to *string") + } + b := []byte(*s) + return json.Unmarshal(b, target) + } + return CustomScanner{new(string), target, binder}, true + case *CustomStringType: + binder := func(holder, target interface{}) error { + s, ok := holder.(*string) + if !ok { + return errors.New("FromDb: Unable to convert CustomStringType to *string") + } + st, ok := target.(*CustomStringType) + if !ok { + return errors.New(fmt.Sprint("FromDb: Unable to convert target to *CustomStringType: ", reflect.TypeOf(target))) + } + *st = CustomStringType(*s) + return nil + } + return CustomScanner{new(string), target, binder}, true + case *CustomDate: + binder := func(holder, target interface{}) error { + t, ok := holder.(*time.Time) + if !ok { + return errors.New("FromDb: Unable to convert CustomDate to *time.Time") + } + dateTarget, ok := target.(*CustomDate) + if !ok { + return errors.New(fmt.Sprint("FromDb: Unable to convert target to *CustomDate: ", reflect.TypeOf(target))) + } + dateTarget.Time = *t + return nil + } + return CustomScanner{new(time.Time), target, binder}, true + } + + return CustomScanner{}, false +} + +func (p *Person) PreInsert(s SqlExecutor) error { + p.Created = time.Now().UnixNano() + p.Updated = p.Created + if p.FName == "badname" { + return fmt.Errorf("Invalid name: %s", p.FName) + } + return nil +} + +func (p *Person) PostInsert(s SqlExecutor) error { + p.LName = "postinsert" + return nil +} + +func (p *Person) PreUpdate(s SqlExecutor) error { + p.FName = "preupdate" + return nil +} + +func (p *Person) PostUpdate(s SqlExecutor) error { + p.LName = "postupdate" + return nil +} + +func (p *Person) PreDelete(s SqlExecutor) error { + p.FName = "predelete" + return nil +} + +func (p *Person) PostDelete(s SqlExecutor) error { + p.LName = "postdelete" + return nil +} + +func (p *Person) PostGet(s SqlExecutor) error { + p.LName = "postget" + return nil +} + +type PersistentUser struct { + Key int32 + Id string + PassedTraining bool +} + +func TestCreateTablesIfNotExists(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + err := dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error(err) + } +} + +func TestTruncateTables(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + err := dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error(err) + } + + // Insert some data + p1 := &Person{0, 0, 0, "Bob", "Smith", 0} + dbmap.Insert(p1) + inv := &Invoice{0, 0, 1, "my invoice", 0, true} + dbmap.Insert(inv) + + err = dbmap.TruncateTables() + if err != nil { + t.Error(err) + } + + // Make sure all rows are deleted + rows, _ := dbmap.Select(Person{}, "SELECT * FROM person_test") + if len(rows) != 0 { + t.Errorf("Expected 0 person rows, got %d", len(rows)) + } + rows, _ = dbmap.Select(Invoice{}, "SELECT * FROM invoice_test") + if len(rows) != 0 { + t.Errorf("Expected 0 invoice rows, got %d", len(rows)) + } +} + +func TestCustomDateType(t *testing.T) { + dbmap := newDbMap() + dbmap.TypeConverter = testTypeConverter{} + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(WithCustomDate{}).SetKeys(true, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + test1 := &WithCustomDate{Added: CustomDate{Time: time.Now().Truncate(time.Second)}} + err = dbmap.Insert(test1) + if err != nil { + t.Errorf("Could not insert struct with custom date field: %s", err) + t.FailNow() + } + // Unfortunately, the mysql driver doesn't handle time.Time + // values properly during Get(). I can't find a way to work + // around that problem - every other type that I've tried is just + // silently converted. time.Time is the only type that causes + // the issue that this test checks for. As such, if the driver is + // mysql, we'll just skip the rest of this test. + if _, driver := dialectAndDriver(); driver == "mysql" { + t.Skip("TestCustomDateType can't run Get() with the mysql driver; skipping the rest of this test...") + } + result, err := dbmap.Get(new(WithCustomDate), test1.Id) + if err != nil { + t.Errorf("Could not get struct with custom date field: %s", err) + t.FailNow() + } + test2 := result.(*WithCustomDate) + if test2.Added.UTC() != test1.Added.UTC() { + t.Errorf("Custom dates do not match: %v != %v", test2.Added.UTC(), test1.Added.UTC()) + } +} + +func TestUIntPrimaryKey(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(PersonUInt64{}).SetKeys(true, "Id") + dbmap.AddTable(PersonUInt32{}).SetKeys(true, "Id") + dbmap.AddTable(PersonUInt16{}).SetKeys(true, "Id") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + p1 := &PersonUInt64{0, "name1"} + p2 := &PersonUInt32{0, "name2"} + p3 := &PersonUInt16{0, "name3"} + err = dbmap.Insert(p1, p2, p3) + if err != nil { + t.Error(err) + } + if p1.Id != 1 { + t.Errorf("%d != 1", p1.Id) + } + if p2.Id != 1 { + t.Errorf("%d != 1", p2.Id) + } + if p3.Id != 1 { + t.Errorf("%d != 1", p3.Id) + } +} + +func TestSetUniqueTogether(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(UniqueColumns{}).SetUniqueTogether("FirstName", "LastName").SetUniqueTogether("City", "ZipCode") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + n1 := &UniqueColumns{"Steve", "Jobs", "Cupertino", 95014} + err = dbmap.Insert(n1) + if err != nil { + t.Error(err) + } + + // Should fail because of the first constraint + n2 := &UniqueColumns{"Steve", "Jobs", "Sunnyvale", 94085} + err = dbmap.Insert(n2) + if err == nil { + t.Error(err) + } + // "unique" for Postgres/SQLite, "Duplicate entry" for MySQL + errLower := strings.ToLower(err.Error()) + if !strings.Contains(errLower, "unique") && !strings.Contains(errLower, "duplicate entry") { + t.Error(err) + } + + // Should also fail because of the second unique-together + n3 := &UniqueColumns{"Steve", "Wozniak", "Cupertino", 95014} + err = dbmap.Insert(n3) + if err == nil { + t.Error(err) + } + // "unique" for Postgres/SQLite, "Duplicate entry" for MySQL + errLower = strings.ToLower(err.Error()) + if !strings.Contains(errLower, "unique") && !strings.Contains(errLower, "duplicate entry") { + t.Error(err) + } + + // This one should finally succeed + n4 := &UniqueColumns{"Steve", "Wozniak", "Sunnyvale", 94085} + err = dbmap.Insert(n4) + if err != nil { + t.Error(err) + } +} + +func TestPersistentUser(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + err = dbmap.Insert(pu) + if err != nil { + panic(err) + } + + // prove we can pass a pointer into Get + pu2, err := dbmap.Get(pu, pu.Key) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(pu, pu2) { + t.Errorf("%v!=%v", pu, pu2) + } + + arr, err := dbmap.Select(pu, "select * from PersistentUser") + if err != nil { + panic(err) + } + if !reflect.DeepEqual(pu, arr[0]) { + t.Errorf("%v!=%v", pu, arr[0]) + } + + // prove we can get the results back in a slice + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, "select * from PersistentUser") + if err != nil { + panic(err) + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } + + // prove we can get the results back in a non-pointer slice + var puValues []PersistentUser + _, err = dbmap.Select(&puValues, "select * from PersistentUser") + if err != nil { + panic(err) + } + if len(puValues) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(*pu, puValues[0]) { + t.Errorf("%v!=%v", *pu, puValues[0]) + } + + // prove we can get the results back in a string slice + var idArr []*string + _, err = dbmap.Select(&idArr, "select Id from PersistentUser") + if err != nil { + panic(err) + } + if len(idArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Id, *idArr[0]) { + t.Errorf("%v!=%v", pu.Id, *idArr[0]) + } + + // prove we can get the results back in an int slice + var keyArr []*int32 + _, err = dbmap.Select(&keyArr, "select mykey from PersistentUser") + if err != nil { + panic(err) + } + if len(keyArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Key, *keyArr[0]) { + t.Errorf("%v!=%v", pu.Key, *keyArr[0]) + } + + // prove we can get the results back in a bool slice + var passedArr []*bool + _, err = dbmap.Select(&passedArr, "select PassedTraining from PersistentUser") + if err != nil { + panic(err) + } + if len(passedArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.PassedTraining, *passedArr[0]) { + t.Errorf("%v!=%v", pu.PassedTraining, *passedArr[0]) + } + + // prove we can get the results back in a non-pointer slice + var stringArr []string + _, err = dbmap.Select(&stringArr, "select Id from PersistentUser") + if err != nil { + panic(err) + } + if len(stringArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu.Id, stringArr[0]) { + t.Errorf("%v!=%v", pu.Id, stringArr[0]) + } +} + +func TestNamedQueryMap(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + pu2 := &PersistentUser{500, "abc", false} + err = dbmap.Insert(pu, pu2) + if err != nil { + panic(err) + } + + // Test simple case + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, "select * from PersistentUser where mykey = :Key", map[string]interface{}{ + "Key": 43, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } + + // Test more specific map value type is ok + puArr = nil + _, err = dbmap.Select(&puArr, "select * from PersistentUser where mykey = :Key", map[string]int{ + "Key": 43, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + + // Test multiple parameters set. + puArr = nil + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and PassedTraining = :PassedTraining + and Id = :Id`, map[string]interface{}{ + "Key": 43, + "PassedTraining": false, + "Id": "33r", + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + + // Test colon within a non-key string + // Test having extra, unused properties in the map. + puArr = nil + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and Id != 'abc:def'`, map[string]interface{}{ + "Key": 43, + "PassedTraining": false, + }) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + + // Test to delete with Exec and named params. + result, err := dbmap.Exec("delete from PersistentUser where mykey = :Key", map[string]interface{}{ + "Key": 43, + }) + count, err := result.RowsAffected() + if err != nil { + t.Errorf("Failed to exec: %s", err) + t.FailNow() + } + if count != 1 { + t.Errorf("Expected 1 persistentuser to be deleted, but %d deleted", count) + } +} + +func TestNamedQueryStruct(t *testing.T) { + dbmap := newDbMap() + dbmap.Exec("drop table if exists PersistentUser") + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + table := dbmap.AddTable(PersistentUser{}).SetKeys(false, "Key") + table.ColMap("Key").Rename("mykey") + err := dbmap.CreateTablesIfNotExists() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + pu := &PersistentUser{43, "33r", false} + pu2 := &PersistentUser{500, "abc", false} + err = dbmap.Insert(pu, pu2) + if err != nil { + panic(err) + } + + // Test select self + var puArr []*PersistentUser + _, err = dbmap.Select(&puArr, ` +select * from PersistentUser + where mykey = :Key + and PassedTraining = :PassedTraining + and Id = :Id`, pu) + if err != nil { + t.Errorf("Failed to select: %s", err) + t.FailNow() + } + if len(puArr) != 1 { + t.Errorf("Expected one persistentuser, found none") + } + if !reflect.DeepEqual(pu, puArr[0]) { + t.Errorf("%v!=%v", pu, puArr[0]) + } + + // Test delete self. + result, err := dbmap.Exec(` +delete from PersistentUser + where mykey = :Key + and PassedTraining = :PassedTraining + and Id = :Id`, pu) + count, err := result.RowsAffected() + if err != nil { + t.Errorf("Failed to exec: %s", err) + t.FailNow() + } + if count != 1 { + t.Errorf("Expected 1 persistentuser to be deleted, but %d deleted", count) + } +} + +// Ensure that the slices containing SQL results are non-nil when the result set is empty. +func TestReturnsNonNilSlice(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + noResultsSQL := "select * from invoice_test where id=99999" + var r1 []*Invoice + _rawselect(dbmap, &r1, noResultsSQL) + if r1 == nil { + t.Errorf("r1==nil") + } + + r2 := _rawselect(dbmap, Invoice{}, noResultsSQL) + if r2 == nil { + t.Errorf("r2==nil") + } +} + +func TestOverrideVersionCol(t *testing.T) { + dbmap := newDbMap() + t1 := dbmap.AddTable(InvoicePersonView{}).SetKeys(false, "InvoiceId", "PersonId") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + c1 := t1.SetVersionCol("LegacyVersion") + if c1.ColumnName != "LegacyVersion" { + t.Errorf("Wrong col returned: %v", c1) + } + + ipv := &InvoicePersonView{1, 2, "memo", "fname", 0} + _update(dbmap, ipv) + if ipv.LegacyVersion != 1 { + t.Errorf("LegacyVersion not updated: %d", ipv.LegacyVersion) + } +} + +func TestOptimisticLocking(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "Bob", "Smith", 0} + dbmap.Insert(p1) // Version is now 1 + if p1.Version != 1 { + t.Errorf("Insert didn't incr Version: %d != %d", 1, p1.Version) + return + } + if p1.Id == 0 { + t.Errorf("Insert didn't return a generated PK") + return + } + + obj, err := dbmap.Get(Person{}, p1.Id) + if err != nil { + panic(err) + } + p2 := obj.(*Person) + p2.LName = "Edwards" + dbmap.Update(p2) // Version is now 2 + if p2.Version != 2 { + t.Errorf("Update didn't incr Version: %d != %d", 2, p2.Version) + } + + p1.LName = "Howard" + count, err := dbmap.Update(p1) + if _, ok := err.(OptimisticLockError); !ok { + t.Errorf("update - Expected OptimisticLockError, got: %v", err) + } + if count != -1 { + t.Errorf("update - Expected -1 count, got: %d", count) + } + + count, err = dbmap.Delete(p1) + if _, ok := err.(OptimisticLockError); !ok { + t.Errorf("delete - Expected OptimisticLockError, got: %v", err) + } + if count != -1 { + t.Errorf("delete - Expected -1 count, got: %d", count) + } +} + +// what happens if a legacy table has a null value? +func TestDoubleAddTable(t *testing.T) { + dbmap := newDbMap() + t1 := dbmap.AddTable(TableWithNull{}).SetKeys(false, "Id") + t2 := dbmap.AddTable(TableWithNull{}) + if t1 != t2 { + t.Errorf("%v != %v", t1, t2) + } +} + +// what happens if a legacy table has a null value? +func TestNullValues(t *testing.T) { + dbmap := initDbMapNulls() + defer dropAndClose(dbmap) + + // insert a row directly + _rawexec(dbmap, "insert into TableWithNull values (10, null, "+ + "null, null, null, null)") + + // try to load it + expected := &TableWithNull{Id: 10} + obj := _get(dbmap, TableWithNull{}, 10) + t1 := obj.(*TableWithNull) + if !reflect.DeepEqual(expected, t1) { + t.Errorf("%v != %v", expected, t1) + } + + // update it + t1.Str = sql.NullString{"hi", true} + expected.Str = t1.Str + t1.Int64 = sql.NullInt64{999, true} + expected.Int64 = t1.Int64 + t1.Float64 = sql.NullFloat64{53.33, true} + expected.Float64 = t1.Float64 + t1.Bool = sql.NullBool{true, true} + expected.Bool = t1.Bool + t1.Bytes = []byte{1, 30, 31, 33} + expected.Bytes = t1.Bytes + _update(dbmap, t1) + + obj = _get(dbmap, TableWithNull{}, 10) + t1 = obj.(*TableWithNull) + if t1.Str.String != "hi" { + t.Errorf("%s != hi", t1.Str.String) + } + if !reflect.DeepEqual(expected, t1) { + t.Errorf("%v != %v", expected, t1) + } +} + +func TestColumnProps(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + t1 := dbmap.AddTable(Invoice{}).SetKeys(true, "Id") + t1.ColMap("Created").Rename("date_created") + t1.ColMap("Updated").SetTransient(true) + t1.ColMap("Memo").SetMaxSize(10) + t1.ColMap("PersonId").SetUnique(true) + + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + defer dropAndClose(dbmap) + + // test transient + inv := &Invoice{0, 0, 1, "my invoice", 0, true} + _insert(dbmap, inv) + obj := _get(dbmap, Invoice{}, inv.Id) + inv = obj.(*Invoice) + if inv.Updated != 0 { + t.Errorf("Saved transient column 'Updated'") + } + + // test max size + inv.Memo = "this memo is too long" + err = dbmap.Insert(inv) + if err == nil { + t.Errorf("max size exceeded, but Insert did not fail.") + } + + // test unique - same person id + inv = &Invoice{0, 0, 1, "my invoice2", 0, false} + err = dbmap.Insert(inv) + if err == nil { + t.Errorf("same PersonId inserted, but Insert did not fail.") + } +} + +func TestRawSelect(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + + inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id, true} + _insert(dbmap, inv1) + + expected := &InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName, 0} + + query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + list := _rawselect(dbmap, InvoicePersonView{}, query) + if len(list) != 1 { + t.Errorf("len(list) != 1: %d", len(list)) + } else if !reflect.DeepEqual(expected, list[0]) { + t.Errorf("%v != %v", expected, list[0]) + } +} + +func TestHooks(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + if p1.Created == 0 || p1.Updated == 0 { + t.Errorf("p1.PreInsert() didn't run: %v", p1) + } else if p1.LName != "postinsert" { + t.Errorf("p1.PostInsert() didn't run: %v", p1) + } + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + if p1.LName != "postget" { + t.Errorf("p1.PostGet() didn't run: %v", p1) + } + + _update(dbmap, p1) + if p1.FName != "preupdate" { + t.Errorf("p1.PreUpdate() didn't run: %v", p1) + } else if p1.LName != "postupdate" { + t.Errorf("p1.PostUpdate() didn't run: %v", p1) + } + + var persons []*Person + bindVar := dbmap.Dialect.BindVar(0) + _rawselect(dbmap, &persons, "select * from person_test where id = "+bindVar, p1.Id) + if persons[0].LName != "postget" { + t.Errorf("p1.PostGet() didn't run after select: %v", p1) + } + + _del(dbmap, p1) + if p1.FName != "predelete" { + t.Errorf("p1.PreDelete() didn't run: %v", p1) + } else if p1.LName != "postdelete" { + t.Errorf("p1.PostDelete() didn't run: %v", p1) + } + + // Test error case + p2 := &Person{0, 0, 0, "badname", "", 0} + err := dbmap.Insert(p2) + if err == nil { + t.Errorf("p2.PreInsert() didn't return an error") + } +} + +func TestTransaction(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "t1", 0, true} + inv2 := &Invoice{0, 100, 200, "t2", 0, false} + + trans, err := dbmap.Begin() + if err != nil { + panic(err) + } + trans.Insert(inv1, inv2) + err = trans.Commit() + if err != nil { + panic(err) + } + + obj, err := dbmap.Get(Invoice{}, inv1.Id) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(inv1, obj) { + t.Errorf("%v != %v", inv1, obj) + } + obj, err = dbmap.Get(Invoice{}, inv2.Id) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(inv2, obj) { + t.Errorf("%v != %v", inv2, obj) + } +} + +func TestSavepoint(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "unpaid", 0, false} + + trans, err := dbmap.Begin() + if err != nil { + panic(err) + } + trans.Insert(inv1) + + var checkMemo = func(want string) { + memo, err := trans.SelectStr("select memo from invoice_test") + if err != nil { + panic(err) + } + if memo != want { + t.Errorf("%q != %q", want, memo) + } + } + checkMemo("unpaid") + + err = trans.Savepoint("foo") + if err != nil { + panic(err) + } + checkMemo("unpaid") + + inv1.Memo = "paid" + _, err = trans.Update(inv1) + if err != nil { + panic(err) + } + checkMemo("paid") + + err = trans.RollbackToSavepoint("foo") + if err != nil { + panic(err) + } + checkMemo("unpaid") + + err = trans.Rollback() + if err != nil { + panic(err) + } +} + +func TestMultiple(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "a", 0, false} + inv2 := &Invoice{0, 100, 200, "b", 0, true} + _insert(dbmap, inv1, inv2) + + inv1.Memo = "c" + inv2.Memo = "d" + _update(dbmap, inv1, inv2) + + count := _del(dbmap, inv1, inv2) + if count != 2 { + t.Errorf("%d != 2", count) + } +} + +func TestCrud(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv := &Invoice{0, 100, 200, "first order", 0, true} + testCrudInternal(t, dbmap, inv) + + invtag := &InvoiceTag{0, 300, 400, "some order", 33, false} + testCrudInternal(t, dbmap, invtag) + + foo := &AliasTransientField{BarStr: "some bar"} + testCrudInternal(t, dbmap, foo) +} + +func testCrudInternal(t *testing.T, dbmap *DbMap, val testable) { + table, _, err := dbmap.tableForPointer(val, false) + if err != nil { + t.Errorf("couldn't call TableFor: val=%v err=%v", val, err) + } + + _, err = dbmap.Exec("delete from " + table.TableName) + if err != nil { + t.Errorf("couldn't delete rows from: val=%v err=%v", val, err) + } + + // INSERT row + _insert(dbmap, val) + if val.GetId() == 0 { + t.Errorf("val.GetId() was not set on INSERT") + return + } + + // SELECT row + val2 := _get(dbmap, val, val.GetId()) + if !reflect.DeepEqual(val, val2) { + t.Errorf("%v != %v", val, val2) + } + + // UPDATE row and SELECT + val.Rand() + count := _update(dbmap, val) + if count != 1 { + t.Errorf("update 1 != %d", count) + } + val2 = _get(dbmap, val, val.GetId()) + if !reflect.DeepEqual(val, val2) { + t.Errorf("%v != %v", val, val2) + } + + // Select * + rows, err := dbmap.Select(val, "select * from "+table.TableName) + if err != nil { + t.Errorf("couldn't select * from %s err=%v", table.TableName, err) + } else if len(rows) != 1 { + t.Errorf("unexpected row count in %s: %d", table.TableName, len(rows)) + } else if !reflect.DeepEqual(val, rows[0]) { + t.Errorf("select * result: %v != %v", val, rows[0]) + } + + // DELETE row + deleted := _del(dbmap, val) + if deleted != 1 { + t.Errorf("Did not delete row with Id: %d", val.GetId()) + return + } + + // VERIFY deleted + val2 = _get(dbmap, val, val.GetId()) + if val2 != nil { + t.Errorf("Found invoice with id: %d after Delete()", val.GetId()) + } +} + +func TestWithIgnoredColumn(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + ic := &WithIgnoredColumn{-1, 0, 1} + _insert(dbmap, ic) + expected := &WithIgnoredColumn{0, 1, 1} + ic2 := _get(dbmap, WithIgnoredColumn{}, ic.Id).(*WithIgnoredColumn) + + if !reflect.DeepEqual(expected, ic2) { + t.Errorf("%v != %v", expected, ic2) + } + if _del(dbmap, ic) != 1 { + t.Errorf("Did not delete row with Id: %d", ic.Id) + return + } + if _get(dbmap, WithIgnoredColumn{}, ic.Id) != nil { + t.Errorf("Found id: %d after Delete()", ic.Id) + } +} + +func TestTypeConversionExample(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p := Person{FName: "Bob", LName: "Smith"} + tc := &TypeConversionExample{-1, p, CustomStringType("hi")} + _insert(dbmap, tc) + + expected := &TypeConversionExample{1, p, CustomStringType("hi")} + tc2 := _get(dbmap, TypeConversionExample{}, tc.Id).(*TypeConversionExample) + if !reflect.DeepEqual(expected, tc2) { + t.Errorf("tc2 %v != %v", expected, tc2) + } + + tc2.Name = CustomStringType("hi2") + tc2.PersonJSON = Person{FName: "Jane", LName: "Doe"} + _update(dbmap, tc2) + + expected = &TypeConversionExample{1, tc2.PersonJSON, CustomStringType("hi2")} + tc3 := _get(dbmap, TypeConversionExample{}, tc.Id).(*TypeConversionExample) + if !reflect.DeepEqual(expected, tc3) { + t.Errorf("tc3 %v != %v", expected, tc3) + } + + if _del(dbmap, tc) != 1 { + t.Errorf("Did not delete row with Id: %d", tc.Id) + } + +} + +func TestWithEmbeddedStruct(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} + _insert(dbmap, es) + expected := &WithEmbeddedStruct{1, Names{FirstName: "Alice", LastName: "Smith"}} + es2 := _get(dbmap, WithEmbeddedStruct{}, es.Id).(*WithEmbeddedStruct) + if !reflect.DeepEqual(expected, es2) { + t.Errorf("%v != %v", expected, es2) + } + + es2.FirstName = "Bob" + expected.FirstName = "Bob" + _update(dbmap, es2) + es2 = _get(dbmap, WithEmbeddedStruct{}, es.Id).(*WithEmbeddedStruct) + if !reflect.DeepEqual(expected, es2) { + t.Errorf("%v != %v", expected, es2) + } + + ess := _rawselect(dbmap, WithEmbeddedStruct{}, "select * from embedded_struct_test") + if !reflect.DeepEqual(es2, ess[0]) { + t.Errorf("%v != %v", es2, ess[0]) + } +} + +func TestWithEmbeddedStructBeforeAutoincr(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + esba := &WithEmbeddedStructBeforeAutoincrField{Names: Names{FirstName: "Alice", LastName: "Smith"}} + _insert(dbmap, esba) + var expectedAutoincrId int64 = 1 + if esba.Id != expectedAutoincrId { + t.Errorf("%d != %d", expectedAutoincrId, esba.Id) + } +} + +func TestWithEmbeddedAutoincr(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + esa := &WithEmbeddedAutoincr{ + WithEmbeddedStruct: WithEmbeddedStruct{Names: Names{FirstName: "Alice", LastName: "Smith"}}, + MiddleName: "Rose", + } + _insert(dbmap, esa) + var expectedAutoincrId int64 = 1 + if esa.Id != expectedAutoincrId { + t.Errorf("%d != %d", expectedAutoincrId, esa.Id) + } +} + +func TestSelectVal(t *testing.T) { + dbmap := initDbMapNulls() + defer dropAndClose(dbmap) + + bindVar := dbmap.Dialect.BindVar(0) + + t1 := TableWithNull{Str: sql.NullString{"abc", true}, + Int64: sql.NullInt64{78, true}, + Float64: sql.NullFloat64{32.2, true}, + Bool: sql.NullBool{true, true}, + Bytes: []byte("hi")} + _insert(dbmap, &t1) + + // SelectInt + i64 := selectInt(dbmap, "select Int64 from TableWithNull where Str='abc'") + if i64 != 78 { + t.Errorf("int64 %d != 78", i64) + } + i64 = selectInt(dbmap, "select count(*) from TableWithNull") + if i64 != 1 { + t.Errorf("int64 count %d != 1", i64) + } + i64 = selectInt(dbmap, "select count(*) from TableWithNull where Str="+bindVar, "asdfasdf") + if i64 != 0 { + t.Errorf("int64 no rows %d != 0", i64) + } + + // SelectNullInt + n := selectNullInt(dbmap, "select Int64 from TableWithNull where Str='notfound'") + if !reflect.DeepEqual(n, sql.NullInt64{0, false}) { + t.Errorf("nullint %v != 0,false", n) + } + + n = selectNullInt(dbmap, "select Int64 from TableWithNull where Str='abc'") + if !reflect.DeepEqual(n, sql.NullInt64{78, true}) { + t.Errorf("nullint %v != 78, true", n) + } + + // SelectFloat + f64 := selectFloat(dbmap, "select Float64 from TableWithNull where Str='abc'") + if f64 != 32.2 { + t.Errorf("float64 %d != 32.2", f64) + } + f64 = selectFloat(dbmap, "select min(Float64) from TableWithNull") + if f64 != 32.2 { + t.Errorf("float64 min %d != 32.2", f64) + } + f64 = selectFloat(dbmap, "select count(*) from TableWithNull where Str="+bindVar, "asdfasdf") + if f64 != 0 { + t.Errorf("float64 no rows %d != 0", f64) + } + + // SelectNullFloat + nf := selectNullFloat(dbmap, "select Float64 from TableWithNull where Str='notfound'") + if !reflect.DeepEqual(nf, sql.NullFloat64{0, false}) { + t.Errorf("nullfloat %v != 0,false", nf) + } + + nf = selectNullFloat(dbmap, "select Float64 from TableWithNull where Str='abc'") + if !reflect.DeepEqual(nf, sql.NullFloat64{32.2, true}) { + t.Errorf("nullfloat %v != 32.2, true", nf) + } + + // SelectStr + s := selectStr(dbmap, "select Str from TableWithNull where Int64="+bindVar, 78) + if s != "abc" { + t.Errorf("s %s != abc", s) + } + s = selectStr(dbmap, "select Str from TableWithNull where Str='asdfasdf'") + if s != "" { + t.Errorf("s no rows %s != ''", s) + } + + // SelectNullStr + ns := selectNullStr(dbmap, "select Str from TableWithNull where Int64="+bindVar, 78) + if !reflect.DeepEqual(ns, sql.NullString{"abc", true}) { + t.Errorf("nullstr %v != abc,true", ns) + } + ns = selectNullStr(dbmap, "select Str from TableWithNull where Str='asdfasdf'") + if !reflect.DeepEqual(ns, sql.NullString{"", false}) { + t.Errorf("nullstr no rows %v != '',false", ns) + } + + // SelectInt/Str with named parameters + i64 = selectInt(dbmap, "select Int64 from TableWithNull where Str=:abc", map[string]string{"abc": "abc"}) + if i64 != 78 { + t.Errorf("int64 %d != 78", i64) + } + ns = selectNullStr(dbmap, "select Str from TableWithNull where Int64=:num", map[string]int{"num": 78}) + if !reflect.DeepEqual(ns, sql.NullString{"abc", true}) { + t.Errorf("nullstr %v != abc,true", ns) + } +} + +func TestVersionMultipleRows(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + persons := []*Person{ + &Person{0, 0, 0, "Bob", "Smith", 0}, + &Person{0, 0, 0, "Jane", "Smith", 0}, + &Person{0, 0, 0, "Mike", "Smith", 0}, + } + + _insert(dbmap, persons[0], persons[1], persons[2]) + + for x, p := range persons { + if p.Version != 1 { + t.Errorf("person[%d].Version != 1: %d", x, p.Version) + } + } +} + +func TestWithStringPk(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTableWithName(WithStringPk{}, "string_pk_test").SetKeys(true, "Id") + _, err := dbmap.Exec("create table string_pk_test (Id varchar(255), Name varchar(255));") + if err != nil { + t.Errorf("couldn't create string_pk_test: %v", err) + } + defer dropAndClose(dbmap) + + row := &WithStringPk{"1", "foo"} + err = dbmap.Insert(row) + if err == nil { + t.Errorf("Expected error when inserting into table w/non Int PK and autoincr set true") + } +} + +// TestSqlExecutorInterfaceSelects ensures that all DbMap methods starting with Select... +// are also exposed in the SqlExecutor interface. Select... functions can always +// run on Pre/Post hooks. +func TestSqlExecutorInterfaceSelects(t *testing.T) { + dbMapType := reflect.TypeOf(&DbMap{}) + sqlExecutorType := reflect.TypeOf((*SqlExecutor)(nil)).Elem() + numDbMapMethods := dbMapType.NumMethod() + for i := 0; i < numDbMapMethods; i += 1 { + dbMapMethod := dbMapType.Method(i) + if !strings.HasPrefix(dbMapMethod.Name, "Select") { + continue + } + if _, found := sqlExecutorType.MethodByName(dbMapMethod.Name); !found { + t.Errorf("Method %s is defined on DbMap but not implemented in SqlExecutor", + dbMapMethod.Name) + } + } +} + +func TestNullTime(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + // if time is null + ent := &WithNullTime{ + Id: 0, + Time: NullTime{ + Valid: false, + }} + err := dbmap.Insert(ent) + if err != nil { + t.Error("failed insert on %s", err.Error()) + } + err = dbmap.SelectOne(ent, `select * from nulltime_test where Id=:Id`, map[string]interface{}{ + "Id": ent.Id, + }) + if err != nil { + t.Error("failed select on %s", err.Error()) + } + if ent.Time.Valid { + t.Error("NullTime returns valid but expected null.") + } + + // if time is not null + ts, err := time.Parse(time.Stamp, "Jan 2 15:04:05") + ent = &WithNullTime{ + Id: 1, + Time: NullTime{ + Valid: true, + Time: ts, + }} + err = dbmap.Insert(ent) + if err != nil { + t.Error("failed insert on %s", err.Error()) + } + err = dbmap.SelectOne(ent, `select * from nulltime_test where Id=:Id`, map[string]interface{}{ + "Id": ent.Id, + }) + if err != nil { + t.Error("failed select on %s", err.Error()) + } + if !ent.Time.Valid { + t.Error("NullTime returns invalid but expected valid.") + } + if ent.Time.Time.UTC() != ts.UTC() { + t.Errorf("expect %v but got %v.", ts, ent.Time.Time) + } + + return +} + +type WithTime struct { + Id int64 + Time time.Time +} + +type Times struct { + One time.Time + Two time.Time +} + +type EmbeddedTime struct { + Id string + Times +} + +func parseTimeOrPanic(format, date string) time.Time { + t1, err := time.Parse(format, date) + if err != nil { + panic(err) + } + return t1 +} + +// TODO: re-enable next two tests when this is merged: +// https://github.com/ziutek/mymysql/pull/77 +// +// This test currently fails w/MySQL b/c tz info is lost +func testWithTime(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + t1 := parseTimeOrPanic("2006-01-02 15:04:05 -0700 MST", + "2013-08-09 21:30:43 +0800 CST") + w1 := WithTime{1, t1} + _insert(dbmap, &w1) + + obj := _get(dbmap, WithTime{}, w1.Id) + w2 := obj.(*WithTime) + if w1.Time.UnixNano() != w2.Time.UnixNano() { + t.Errorf("%v != %v", w1, w2) + } +} + +// See: https://github.com/go-gorp/gorp/issues/86 +func testEmbeddedTime(t *testing.T) { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(EmbeddedTime{}).SetKeys(false, "Id") + defer dropAndClose(dbmap) + err := dbmap.CreateTables() + if err != nil { + t.Fatal(err) + } + + time1 := parseTimeOrPanic("2006-01-02 15:04:05", "2013-08-09 21:30:43") + + t1 := &EmbeddedTime{Id: "abc", Times: Times{One: time1, Two: time1.Add(10 * time.Second)}} + _insert(dbmap, t1) + + x := _get(dbmap, EmbeddedTime{}, t1.Id) + t2, _ := x.(*EmbeddedTime) + if t1.One.UnixNano() != t2.One.UnixNano() || t1.Two.UnixNano() != t2.Two.UnixNano() { + t.Errorf("%v != %v", t1, t2) + } +} + +func TestWithTimeSelect(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + halfhourago := time.Now().UTC().Add(-30 * time.Minute) + + w1 := WithTime{1, halfhourago.Add(time.Minute * -1)} + w2 := WithTime{2, halfhourago.Add(time.Second)} + _insert(dbmap, &w1, &w2) + + var caseIds []int64 + _, err := dbmap.Select(&caseIds, "SELECT id FROM time_test WHERE Time < "+dbmap.Dialect.BindVar(0), halfhourago) + + if err != nil { + t.Error(err) + } + if len(caseIds) != 1 { + t.Errorf("%d != 1", len(caseIds)) + } + if caseIds[0] != w1.Id { + t.Errorf("%d != %d", caseIds[0], w1.Id) + } +} + +func TestInvoicePersonView(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + // Create some rows + p1 := &Person{0, 0, 0, "bob", "smith", 0} + dbmap.Insert(p1) + + // notice how we can wire up p1.Id to the invoice easily + inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id, false} + dbmap.Insert(inv1) + + // Run your query + query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + + // pass a slice of pointers to Select() + // this avoids the need to type assert after the query is run + var list []*InvoicePersonView + _, err := dbmap.Select(&list, query) + if err != nil { + panic(err) + } + + // this should test true + expected := &InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName, 0} + if !reflect.DeepEqual(list[0], expected) { + t.Errorf("%v != %v", list[0], expected) + } +} + +func TestQuoteTableNames(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + quotedTableName := dbmap.Dialect.QuoteField("person_test") + + // Use a buffer to hold the log to check generated queries + logBuffer := &bytes.Buffer{} + dbmap.TraceOn("", log.New(logBuffer, "gorptest:", log.Lmicroseconds)) + + // Create some rows + p1 := &Person{0, 0, 0, "bob", "smith", 0} + errorTemplate := "Expected quoted table name %v in query but didn't find it" + + // Check if Insert quotes the table name + id := dbmap.Insert(p1) + if !bytes.Contains(logBuffer.Bytes(), []byte(quotedTableName)) { + t.Errorf(errorTemplate, quotedTableName) + } + logBuffer.Reset() + + // Check if Get quotes the table name + dbmap.Get(Person{}, id) + if !bytes.Contains(logBuffer.Bytes(), []byte(quotedTableName)) { + t.Errorf(errorTemplate, quotedTableName) + } + logBuffer.Reset() +} + +func TestSelectTooManyCols(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + p2 := &Person{0, 0, 0, "jane", "doe", 0} + _insert(dbmap, p1) + _insert(dbmap, p2) + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + obj = _get(dbmap, Person{}, p2.Id) + p2 = obj.(*Person) + + params := map[string]interface{}{ + "Id": p1.Id, + } + + var p3 FNameOnly + err := dbmap.SelectOne(&p3, "select * from person_test where Id=:Id", params) + if err != nil { + if !NonFatalError(err) { + t.Error(err) + } + } else { + t.Errorf("Non-fatal error expected") + } + + if p1.FName != p3.FName { + t.Errorf("%v != %v", p1.FName, p3.FName) + } + + var pSlice []FNameOnly + _, err = dbmap.Select(&pSlice, "select * from person_test order by fname asc") + if err != nil { + if !NonFatalError(err) { + t.Error(err) + } + } else { + t.Errorf("Non-fatal error expected") + } + + if p1.FName != pSlice[0].FName { + t.Errorf("%v != %v", p1.FName, pSlice[0].FName) + } + if p2.FName != pSlice[1].FName { + t.Errorf("%v != %v", p2.FName, pSlice[1].FName) + } +} + +func TestSelectSingleVal(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &Person{0, 0, 0, "bob", "smith", 0} + _insert(dbmap, p1) + + obj := _get(dbmap, Person{}, p1.Id) + p1 = obj.(*Person) + + params := map[string]interface{}{ + "Id": p1.Id, + } + + var p2 Person + err := dbmap.SelectOne(&p2, "select * from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(p1, &p2) { + t.Errorf("%v != %v", p1, &p2) + } + + // verify SelectOne allows non-struct holders + var s string + err = dbmap.SelectOne(&s, "select FName from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + if s != "bob" { + t.Error("Expected bob but got: " + s) + } + + // verify SelectOne requires pointer receiver + err = dbmap.SelectOne(s, "select FName from person_test where Id=:Id", params) + if err == nil { + t.Error("SelectOne should have returned error for non-pointer holder") + } + + // verify SelectOne works with uninitialized pointers + var p3 *Person + err = dbmap.SelectOne(&p3, "select * from person_test where Id=:Id", params) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(p1, p3) { + t.Errorf("%v != %v", p1, p3) + } + + // verify that the receiver is still nil if nothing was found + var p4 *Person + dbmap.SelectOne(&p3, "select * from person_test where 2<1 AND Id=:Id", params) + if p4 != nil { + t.Error("SelectOne should not have changed a nil receiver when no rows were found") + } + + // verify that the error is set to sql.ErrNoRows if not found + err = dbmap.SelectOne(&p2, "select * from person_test where Id=:Id", map[string]interface{}{ + "Id": -2222, + }) + if err == nil || err != sql.ErrNoRows { + t.Error("SelectOne should have returned an sql.ErrNoRows") + } + + _insert(dbmap, &Person{0, 0, 0, "bob", "smith", 0}) + err = dbmap.SelectOne(&p2, "select * from person_test where Fname='bob'") + if err == nil { + t.Error("Expected error when two rows found") + } + + // tests for #150 + var tInt int64 + var tStr string + var tBool bool + var tFloat float64 + primVals := []interface{}{tInt, tStr, tBool, tFloat} + for _, prim := range primVals { + err = dbmap.SelectOne(&prim, "select * from person_test where Id=-123") + if err == nil || err != sql.ErrNoRows { + t.Error("primVals: SelectOne should have returned sql.ErrNoRows") + } + } +} + +func TestSelectAlias(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + p1 := &IdCreatedExternal{IdCreated: IdCreated{Id: 1, Created: 3}, External: 2} + + // Insert using embedded IdCreated, which reflects the structure of the table + _insert(dbmap, &p1.IdCreated) + + // Select into IdCreatedExternal type, which includes some fields not present + // in id_created_test + var p2 IdCreatedExternal + err := dbmap.SelectOne(&p2, "select * from id_created_test where Id=1") + if err != nil { + t.Error(err) + } + if p2.Id != 1 || p2.Created != 3 || p2.External != 0 { + t.Error("Expected ignored field defaults to not set") + } + + // Prove that we can supply an aliased value in the select, and that it will + // automatically map to IdCreatedExternal.External + err = dbmap.SelectOne(&p2, "SELECT *, 1 AS external FROM id_created_test") + if err != nil { + t.Error(err) + } + if p2.External != 1 { + t.Error("Expected select as can map to exported field.") + } + + var rows *sql.Rows + var cols []string + rows, err = dbmap.Db.Query("SELECT * FROM id_created_test") + cols, err = rows.Columns() + if err != nil || len(cols) != 2 { + t.Error("Expected ignored column not created") + } +} + +func TestMysqlPanicIfDialectNotInitialized(t *testing.T) { + _, driver := dialectAndDriver() + // this test only applies to MySQL + if os.Getenv("GORP_TEST_DIALECT") != "mysql" { + return + } + + // The expected behaviour is to catch a panic. + // Here is the deferred function which will check if a panic has indeed occurred : + defer func() { + r := recover() + if r == nil { + t.Error("db.CreateTables() should panic if db is initialized with an incorrect MySQLDialect") + } + }() + + // invalid MySQLDialect : does not contain Engine or Encoding specification + dialect := MySQLDialect{} + db := &DbMap{Db: connect(driver), Dialect: dialect} + db.AddTableWithName(Invoice{}, "invoice") + // the following call should panic : + db.CreateTables() +} + +func TestSingleColumnKeyDbReturnsZeroRowsUpdatedOnPKChange(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + dbmap.AddTableWithName(SingleColumnTable{}, "single_column_table").SetKeys(false, "SomeId") + err := dbmap.DropTablesIfExists() + if err != nil { + t.Error("Drop tables failed") + } + err = dbmap.CreateTablesIfNotExists() + if err != nil { + t.Error("Create tables failed") + } + err = dbmap.TruncateTables() + if err != nil { + t.Error("Truncate tables failed") + } + + sct := SingleColumnTable{ + SomeId: "A Unique Id String", + } + + count, err := dbmap.Update(&sct) + if err != nil { + t.Error(err) + } + if count != 0 { + t.Errorf("Expected 0 updated rows, got %d", count) + } + +} + +func TestPrepare(t *testing.T) { + dbmap := initDbMap() + defer dropAndClose(dbmap) + + inv1 := &Invoice{0, 100, 200, "prepare-foo", 0, false} + inv2 := &Invoice{0, 100, 200, "prepare-bar", 0, false} + _insert(dbmap, inv1, inv2) + + bindVar0 := dbmap.Dialect.BindVar(0) + bindVar1 := dbmap.Dialect.BindVar(1) + stmt, err := dbmap.Prepare(fmt.Sprintf("UPDATE invoice_test SET Memo=%s WHERE Id=%s", bindVar0, bindVar1)) + if err != nil { + t.Error(err) + } + defer stmt.Close() + _, err = stmt.Exec("prepare-baz", inv1.Id) + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv1, "SELECT * from invoice_test WHERE Memo='prepare-baz'") + if err != nil { + t.Error(err) + } + + trans, err := dbmap.Begin() + if err != nil { + t.Error(err) + } + transStmt, err := trans.Prepare(fmt.Sprintf("UPDATE invoice_test SET IsPaid=%s WHERE Id=%s", bindVar0, bindVar1)) + if err != nil { + t.Error(err) + } + defer transStmt.Close() + _, err = transStmt.Exec(true, inv2.Id) + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err == nil || err != sql.ErrNoRows { + t.Error("SelectOne should have returned an sql.ErrNoRows") + } + err = trans.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err != nil { + t.Error(err) + } + err = trans.Commit() + if err != nil { + t.Error(err) + } + err = dbmap.SelectOne(inv2, fmt.Sprintf("SELECT * from invoice_test WHERE IsPaid=%s", bindVar0), true) + if err != nil { + t.Error(err) + } +} + +func BenchmarkNativeCrud(b *testing.B) { + b.StopTimer() + dbmap := initDbMapBench() + defer dropAndClose(dbmap) + b.StartTimer() + + insert := "insert into invoice_test (Created, Updated, Memo, PersonId) values (?, ?, ?, ?)" + sel := "select Id, Created, Updated, Memo, PersonId from invoice_test where Id=?" + update := "update invoice_test set Created=?, Updated=?, Memo=?, PersonId=? where Id=?" + delete := "delete from invoice_test where Id=?" + + inv := &Invoice{0, 100, 200, "my memo", 0, false} + + for i := 0; i < b.N; i++ { + res, err := dbmap.Db.Exec(insert, inv.Created, inv.Updated, + inv.Memo, inv.PersonId) + if err != nil { + panic(err) + } + + newid, err := res.LastInsertId() + if err != nil { + panic(err) + } + inv.Id = newid + + row := dbmap.Db.QueryRow(sel, inv.Id) + err = row.Scan(&inv.Id, &inv.Created, &inv.Updated, &inv.Memo, + &inv.PersonId) + if err != nil { + panic(err) + } + + inv.Created = 1000 + inv.Updated = 2000 + inv.Memo = "my memo 2" + inv.PersonId = 3000 + + _, err = dbmap.Db.Exec(update, inv.Created, inv.Updated, inv.Memo, + inv.PersonId, inv.Id) + if err != nil { + panic(err) + } + + _, err = dbmap.Db.Exec(delete, inv.Id) + if err != nil { + panic(err) + } + } + +} + +func BenchmarkGorpCrud(b *testing.B) { + b.StopTimer() + dbmap := initDbMapBench() + defer dropAndClose(dbmap) + b.StartTimer() + + inv := &Invoice{0, 100, 200, "my memo", 0, true} + for i := 0; i < b.N; i++ { + err := dbmap.Insert(inv) + if err != nil { + panic(err) + } + + obj, err := dbmap.Get(Invoice{}, inv.Id) + if err != nil { + panic(err) + } + + inv2, ok := obj.(*Invoice) + if !ok { + panic(fmt.Sprintf("expected *Invoice, got: %v", obj)) + } + + inv2.Created = 1000 + inv2.Updated = 2000 + inv2.Memo = "my memo 2" + inv2.PersonId = 3000 + _, err = dbmap.Update(inv2) + if err != nil { + panic(err) + } + + _, err = dbmap.Delete(inv2) + if err != nil { + panic(err) + } + + } +} + +func initDbMapBench() *DbMap { + dbmap := newDbMap() + dbmap.Db.Exec("drop table if exists invoice_test") + dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + return dbmap +} + +func initDbMap() *DbMap { + dbmap := newDbMap() + dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") + dbmap.AddTableWithName(InvoiceTag{}, "invoice_tag_test").SetKeys(true, "myid") + dbmap.AddTableWithName(AliasTransientField{}, "alias_trans_field_test").SetKeys(true, "id") + dbmap.AddTableWithName(OverriddenInvoice{}, "invoice_override_test").SetKeys(false, "Id") + dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id").SetVersionCol("Version") + dbmap.AddTableWithName(WithIgnoredColumn{}, "ignored_column_test").SetKeys(true, "Id") + dbmap.AddTableWithName(IdCreated{}, "id_created_test").SetKeys(true, "Id") + dbmap.AddTableWithName(TypeConversionExample{}, "type_conv_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedStruct{}, "embedded_struct_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedStructBeforeAutoincrField{}, "embedded_struct_before_autoincr_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithEmbeddedAutoincr{}, "embedded_autoincr_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithTime{}, "time_test").SetKeys(true, "Id") + dbmap.AddTableWithName(WithNullTime{}, "nulltime_test").SetKeys(false, "Id") + dbmap.TypeConverter = testTypeConverter{} + err := dbmap.DropTablesIfExists() + if err != nil { + panic(err) + } + err = dbmap.CreateTables() + if err != nil { + panic(err) + } + + // See #146 and TestSelectAlias - this type is mapped to the same + // table as IdCreated, but includes an extra field that isn't in the table + dbmap.AddTableWithName(IdCreatedExternal{}, "id_created_test").SetKeys(true, "Id") + + return dbmap +} + +func initDbMapNulls() *DbMap { + dbmap := newDbMap() + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + dbmap.AddTable(TableWithNull{}).SetKeys(false, "Id") + err := dbmap.CreateTables() + if err != nil { + panic(err) + } + return dbmap +} + +func newDbMap() *DbMap { + dialect, driver := dialectAndDriver() + dbmap := &DbMap{Db: connect(driver), Dialect: dialect} + dbmap.TraceOn("", log.New(os.Stdout, "gorptest: ", log.Lmicroseconds)) + return dbmap +} + +func dropAndClose(dbmap *DbMap) { + dbmap.DropTablesIfExists() + dbmap.Db.Close() +} + +func connect(driver string) *sql.DB { + dsn := os.Getenv("GORP_TEST_DSN") + if dsn == "" { + panic("GORP_TEST_DSN env variable is not set. Please see README.md") + } + + db, err := sql.Open(driver, dsn) + if err != nil { + panic("Error connecting to db: " + err.Error()) + } + return db +} + +func dialectAndDriver() (Dialect, string) { + switch os.Getenv("GORP_TEST_DIALECT") { + case "mysql": + return MySQLDialect{"InnoDB", "UTF8"}, "mymysql" + case "gomysql": + return MySQLDialect{"InnoDB", "UTF8"}, "mysql" + case "postgres": + return PostgresDialect{}, "postgres" + case "sqlite": + return SqliteDialect{}, "sqlite3" + } + panic("GORP_TEST_DIALECT env variable is not set or is invalid. Please see README.md") +} + +func _insert(dbmap *DbMap, list ...interface{}) { + err := dbmap.Insert(list...) + if err != nil { + panic(err) + } +} + +func _update(dbmap *DbMap, list ...interface{}) int64 { + count, err := dbmap.Update(list...) + if err != nil { + panic(err) + } + return count +} + +func _del(dbmap *DbMap, list ...interface{}) int64 { + count, err := dbmap.Delete(list...) + if err != nil { + panic(err) + } + + return count +} + +func _get(dbmap *DbMap, i interface{}, keys ...interface{}) interface{} { + obj, err := dbmap.Get(i, keys...) + if err != nil { + panic(err) + } + + return obj +} + +func selectInt(dbmap *DbMap, query string, args ...interface{}) int64 { + i64, err := SelectInt(dbmap, query, args...) + if err != nil { + panic(err) + } + + return i64 +} + +func selectNullInt(dbmap *DbMap, query string, args ...interface{}) sql.NullInt64 { + i64, err := SelectNullInt(dbmap, query, args...) + if err != nil { + panic(err) + } + + return i64 +} + +func selectFloat(dbmap *DbMap, query string, args ...interface{}) float64 { + f64, err := SelectFloat(dbmap, query, args...) + if err != nil { + panic(err) + } + + return f64 +} + +func selectNullFloat(dbmap *DbMap, query string, args ...interface{}) sql.NullFloat64 { + f64, err := SelectNullFloat(dbmap, query, args...) + if err != nil { + panic(err) + } + + return f64 +} + +func selectStr(dbmap *DbMap, query string, args ...interface{}) string { + s, err := SelectStr(dbmap, query, args...) + if err != nil { + panic(err) + } + + return s +} + +func selectNullStr(dbmap *DbMap, query string, args ...interface{}) sql.NullString { + s, err := SelectNullStr(dbmap, query, args...) + if err != nil { + panic(err) + } + + return s +} + +func _rawexec(dbmap *DbMap, query string, args ...interface{}) sql.Result { + res, err := dbmap.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +func _rawselect(dbmap *DbMap, i interface{}, query string, args ...interface{}) []interface{} { + list, err := dbmap.Select(i, query, args...) + if err != nil { + panic(err) + } + return list +} diff --git a/Godeps/_workspace/src/github.com/go-gorp/gorp/test_all.sh b/Godeps/_workspace/src/github.com/go-gorp/gorp/test_all.sh new file mode 100644 index 000000000..f870b39a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-gorp/gorp/test_all.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +set -e + +export GORP_TEST_DSN=gorptest/gorptest/gorptest +export GORP_TEST_DIALECT=mysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=gorptest:gorptest@/gorptest +export GORP_TEST_DIALECT=gomysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test $GOBUILDFLAG . diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 000000000..ba8e0cb3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 000000000..2f4e3c2f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +before_script: + - mysql -e 'create database gotest;' diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 000000000..4b65bf363 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,42 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Arne Hormann +Carlos Nieto +Chris Moos +DisposaBoy +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +INADA Naoki +James Harr +Jian Zhen +Julien Schmidt +Kamil Dziedzic +Leonardo YongUk Kim +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Runrioter Wung +Soroush Pour +Xiaobing Jiang +Xiuming Chen + +# Organizations + +Barracuda Networks, Inc. +Google Inc. +Stripe Inc. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 000000000..161ad0fcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,92 @@ +## HEAD + +Changes: + + - Go 1.1 is no longer supported + - Use decimals field from MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + +Bugfixes: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + +New Features: + - Support for returning table alias on Columns() (#289) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 000000000..f87c19824 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +Please provide the following minimum information: +* Your Go-MySQL-Driver version (or git SHA) +* Your Go version (run `go version` in your console) +* A detailed issue description +* Error Log if present +* If possible, a short example + + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Pull Requests Checklist + +Please check the following points before submitting your pull request: +- [x] Code compiles correctly +- [x] Created tests, if possible +- [x] All tests pass +- [x] Extended the README / documentation, if necessary +- [x] Added yourself to the AUTHORS file + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 000000000..14e2f777f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 000000000..565614eef --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 000000000..fb8a2f5f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("Warning on %q: %v", query, w) + } else { + b.Fatalf("Error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &config{ + interpolateParams: true, + loc: time.UTC, + }, + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 000000000..509ce89e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,136 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import "io" + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + rd io.Reader + idx int + length int +} + +func newBuffer(rd io.Reader) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + rd: rd, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + nn, err := b.rd.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 000000000..6c1d613d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation byte = 33 // utf8_general_ci + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[byte]bool{ + 1: true, // big5_chinese_ci + 13: true, // sjis_japanese_ci + 28: true, // gbk_chinese_ci + 84: true, // big5_bin + 86: true, // gb2312_bin + 87: true, // gbk_bin + 88: true, // sjis_bin + 95: true, // cp932_japanese_ci + 96: true, // cp932_bin +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 000000000..a6d39bec9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,402 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql/driver" + "errors" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *config + maxPacketAllowed int + maxWriteSize int + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + strict bool +} + +type config struct { + user string + passwd string + net string + addr string + dbname string + params map[string]string + loc *time.Location + tls *tls.Config + timeout time.Duration + collation uint8 + allowAllFiles bool + allowOldPasswords bool + clientFoundRows bool + columnsWithAlias bool + interpolateParams bool +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // time.Time parsing + case "parseTime": + var isBool bool + mc.parseTime, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Strict mode + case "strict": + var isBool bool + mc.strict, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Compression + case "compress": + err = errors.New("Compression not implemented yet") + return + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + err = mc.writeCommandPacket(comQuit) + if err == nil { + err = mc.netConn.Close() + } else { + mc.netConn.Close() + } + mc.netConn = nil + } + + mc.cfg = nil + mc.buf.rd = nil + + return +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", driver.ErrBadConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxPacketAllowed { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + // no columns, no more data + return emptyRows{}, nil + } + // Columns + rows.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, err +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 000000000..dddc12908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,162 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeNewDecimal byte = iota + 0xf6 + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 000000000..3cbbe6031 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,140 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// This struct is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = parseDSN(dsn) + if err != nil { + return nil, err + } + + // Connect to Server + if dial, ok := dials[mc.cfg.net]; ok { + mc.netConn, err = dial(mc.cfg.addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.timeout} + mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + mc.buf = newBuffer(mc.netConn) + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.Close() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + + // Read Result Packet + err = mc.readResultOK() + if err != nil { + // Retry with old authentication method, if allowed + if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword { + if err = mc.writeOldAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else { + mc.Close() + return nil, err + } + + } + + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxPacketAllowed = stringToInt(maxap) - 1 + if mc.maxPacketAllowed < maxPacketSize { + mc.maxWriteSize = mc.maxPacketAllowed + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 000000000..cb0d5f5ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1657 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := parseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("Error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("Exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("Query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("Next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("Expected InsertID 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("Expected 0 affected row, got %d", count) + } + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("Valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("Invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("Valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("Invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("Valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("Invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("Valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("Invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("Nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("Inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("Unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("Rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent file didn't fail") + } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("Server does not support TLS") + } else { + dbt.Fatalf("Error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("No Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("Error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("Error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("Error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("Panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("Unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("Error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("Expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("Connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("Expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("Didn't get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("Times don't match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("Unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("Getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("1st rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("1st rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("2nd rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("Unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("2nd rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("Unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("Testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("Error on Conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("Reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("Connection failed: %s", err.Error()) + } +} + +func TestSqlInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("Sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("Error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 000000000..97d7b3996 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,129 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("Invalid Connection") + ErrMalformPkt = errors.New("Malformed Packet") + ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS") + ErrOldPassword = errors.New("This server only supports the insecure old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") + ErrPktSync = errors.New("Commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") + ErrBusyBuffer = errors.New("Busy buffer") +) + +var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// MySQLWarnings is an error type which represents a group of one or more MySQL +// warnings +type MySQLWarnings []MySQLWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf( + "%s %s: %s", + warning.Level, + warning.Code, + warning.Message, + ) + } + return msg +} + +// MySQLWarning is an error type which represents a single MySQL warning. +// Warnings are returned in groups only. See MySQLWarnings +type MySQLWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", nil) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + for { + err = rows.Next(values) + switch err { + case nil: + warning := MySQLWarning{} + + if raw, ok := values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok := values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok := values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 000000000..96f9126d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 000000000..121a04c71 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,162 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" +) + +var ( + fileRegister map[string]bool + readerRegister map[string]func() io.Reader +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + delete(fileRegister, strings.Trim(filePath, `"`)) +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + delete(readerRegister, name) +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + + if strings.HasPrefix(name, "Reader::") { // io.Reader + name = name[8:] + if handler, inMap := readerRegister[name]; inMap { + rdr = handler() + if rdr != nil { + data = make([]byte, 4+mc.maxWriteSize) + + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + if mc.cfg.allowAllFiles || fileRegister[name] { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize { + data = make([]byte, 4+fileSize) + } else if fileSize <= mc.maxPacketAllowed { + data = make([]byte, 4+mc.maxWriteSize) + } else { + err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed) + } + } + } + } else { + err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) + } + } + + // send content packets + if err == nil { + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } else { + mc.readPacket() + } + return err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 000000000..290a3887a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1138 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var payload []byte + for { + // Read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // Packet Length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + if pktLen < 1 { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, driver.ErrBadConn + } + + // Check Packet Sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } else { + return nil, ErrPktSync + } + } + mc.sequence++ + + // Read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + isLastPacket := (pktLen < maxPacketSize) + + // Zero allocations for non-splitting packets + if isLastPacket && payload == nil { + return data, nil + } + + payload = append(payload, data...) + + if isLastPacket { + return payload, nil + } + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxPacketAllowed { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(ErrMalformPkt) + } else { + errLog.Print(err) + } + return driver.ErrBadConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + return cipher, nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + mc.flags&clientLongFlag + + if mc.cfg.clientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + + // To specify a db name + if n := len(mc.cfg.dbname); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + data[12] = mc.cfg.collation + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.rd = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + 23 + + // User [null terminated string] + if len(mc.cfg.user) > 0 { + pos += copy(data[pos:], mc.cfg.user) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.dbname) > 0 { + pos += copy(data[pos:], mc.cfg.dbname) + data[pos] = 0x00 + } + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) + + // Calculate the packet lenght and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return mc.handleOkPacket(data) + + case iEOF: + // someone is using old_passwords + return ErrOldPassword + + default: // Error otherwise + return mc.handleErrorPacket(data) + } + } + return err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = statusFlag(data[1+n+m]) | statusFlag(data[1+n+m+1])<<8 + + // warning count [2 bytes] + if !mc.strict { + return nil + } else { + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.columnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + // Length [uint32] + pos += n + 1 + 2 + 4 + + // Field type [uint8] + columns[i].fieldType = data[pos] + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc = nil + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + + // No Err and no EOF Packet + if err == nil && data[0] != iEOF { + continue + } + return err // Err or EOF + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } else { + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxPacketAllowed - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "Arguments count mismatch (Got: %d Has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("Can't convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + rows.mc = nil + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + return io.EOF + } + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) + default: + var dstlen uint8 + if rows.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 000000000..c6438d034 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 000000000..9d97d6d4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,102 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + tableName string + name string + flags fieldFlag + fieldType byte + decimals byte +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +type emptyRows struct{} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + if rows.mc.cfg.columnsWithAlias { + for i := range columns { + columns[i] = rows.columns[i].tableName + "." + rows.columns[i].name + } + } else { + for i := range columns { + columns[i] = rows.columns[i].name + } + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return ErrInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows emptyRows) Columns() []string { + return nil +} + +func (rows emptyRows) Close() error { + return nil +} + +func (rows emptyRows) Next(dest []driver.Value) error { + return io.EOF +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 000000000..f9dae03fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,149 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "reflect" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + rows.mc = mc + + if resLen > 0 { + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} + +type converter struct{} + +func (converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return driver.DefaultParameterConverter.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return fmt.Sprintf("%d", u64), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 000000000..33c749b35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 000000000..6693d2970 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,963 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "net/url" + "strings" + "time" +) + +var ( + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs + + errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset") +) + +func init() { + tlsConfigRegister = make(map[string]*tls.Config) +} + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("Key '%s' is reserved", key) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + delete(tlsConfigRegister, key) +} + +// parseDSN parses the DSN string to a config +func parseDSN(dsn string) (cfg *config, err error) { + // New config with some default values + cfg = &config{ + loc: time.UTC, + collation: defaultCollation, + } + + // TODO: use strings.IndexByte when we can depend on Go 1.2 + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.passwd = dsn[k+1 : j] + break + } + } + cfg.user = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.addr = dsn[k+1 : i-1] + break + } + } + cfg.net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.dbname = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if cfg.interpolateParams && unsafeCollations[cfg.collation] { + return nil, errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.net == "" { + cfg.net = "tcp" + } + + // Set default address if empty + if cfg.addr == "" { + switch cfg.net { + case "tcp": + cfg.addr = "127.0.0.1:3306" + case "unix": + cfg.addr = "/tmp/mysql.sock" + default: + return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") + } + + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.interpolateParams, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.allowAllFiles, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.allowOldPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.clientFoundRows, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Collation + case "collation": + collation, ok := collations[value] + if !ok { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + err = errors.New("unknown collation") + return + } + cfg.collation = collation + break + + case "columnsWithAlias": + var isBool bool + cfg.columnsWithAlias, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // Dial Timeout + case "timeout": + cfg.timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.tls = &tls.Config{} + } + } else { + if strings.ToLower(value) == "skip-verify" { + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else if tlsConfig, ok := tlsConfigRegister[value]; ok { + if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.addr) + if err == nil { + tlsConfig.ServerName = host + } + } + + cfg.tls = tlsConfig + } else { + return fmt.Errorf("Invalid value / unknown config name: %s", value) + } + } + + default: + // lazy init + if cfg.params == nil { + cfg.params = make(map[string]string) + } + + if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("Invalid Time-String: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 000000000..adb8dcbd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,346 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "fmt" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out string + loc *time.Location +}{ + {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:true interpolateParams:false}", time.UTC}, + {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true clientFoundRows:true columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.Local}, + {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, +} + +func TestDSNParser(t *testing.T) { + var cfg *config + var err error + var res string + + for i, tst := range testDSNs { + cfg, err = parseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + res = fmt.Sprintf("%+v", cfg) + if res != fmt.Sprintf(tst.out, tst.loc) { + t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc)) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "user:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := parseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "user:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := parseDSN(tst) + if err == nil { + t.Errorf("Invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Error("Expected %v, Got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := parseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go new file mode 100644 index 000000000..c0654f5d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt.go @@ -0,0 +1,74 @@ +package aws + +import ( + "time" +) + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other goamz packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go new file mode 100644 index 000000000..8ba497715 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/attempt_test.go @@ -0,0 +1,58 @@ +package aws_test + +import ( + "time" + + "github.com/goamz/goamz/aws" + . "gopkg.in/check.v1" +) + +func (S) TestAttemptTiming(c *C) { + testAttempt := aws.AttemptStrategy{ + Total: 0.25e9, + Delay: 0.1e9, + } + want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} + got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing + t0 := time.Now() + for a := testAttempt.Start(); a.Next(); { + got = append(got, time.Now().Sub(t0)) + } + got = append(got, time.Now().Sub(t0)) + c.Assert(got, HasLen, len(want)) + const margin = 0.01e9 + for i, got := range want { + lo := want[i] - margin + hi := want[i] + margin + if got < lo || got > hi { + c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) + } + } +} + +func (S) TestAttemptNextHasNext(c *C) { + a := aws.AttemptStrategy{}.Start() + c.Assert(a.Next(), Equals, true) + c.Assert(a.Next(), Equals, false) + + a = aws.AttemptStrategy{}.Start() + c.Assert(a.Next(), Equals, true) + c.Assert(a.HasNext(), Equals, false) + c.Assert(a.Next(), Equals, false) + + a = aws.AttemptStrategy{Total: 2e8}.Start() + c.Assert(a.Next(), Equals, true) + c.Assert(a.HasNext(), Equals, true) + time.Sleep(2e8) + c.Assert(a.HasNext(), Equals, true) + c.Assert(a.Next(), Equals, true) + c.Assert(a.Next(), Equals, false) + + a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start() + time.Sleep(1e8) + c.Assert(a.Next(), Equals, true) + c.Assert(a.HasNext(), Equals, true) + c.Assert(a.Next(), Equals, true) + c.Assert(a.HasNext(), Equals, false) + c.Assert(a.Next(), Equals, false) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go new file mode 100644 index 000000000..cec40be7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws.go @@ -0,0 +1,431 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// +package aws + +import ( + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" + + "github.com/vaughan0/go-ini" +) + +// Defines the valid signers +const ( + V2Signature = iota + V4Signature = iota + Route53Signature = iota +) + +// Defines the service endpoint and correct Signer implementation to use +// to sign requests for this endpoint +type ServiceInfo struct { + Endpoint string + Signer uint +} + +// Region defines the URLs where AWS services may be accessed. +// +// See http://goo.gl/d8BP1 for more details. +type Region struct { + Name string // the canonical name of this region. + EC2Endpoint string + S3Endpoint string + S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. + S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. + S3LowercaseBucket bool // true if the region requires bucket names to be lower case. + SDBEndpoint string + SESEndpoint string + SNSEndpoint string + SQSEndpoint string + IAMEndpoint string + ELBEndpoint string + DynamoDBEndpoint string + CloudWatchServicepoint ServiceInfo + AutoScalingEndpoint string + RDSEndpoint ServiceInfo + STSEndpoint string + CloudFormationEndpoint string + ECSEndpoint string +} + +var Regions = map[string]Region{ + APNortheast.Name: APNortheast, + APSoutheast.Name: APSoutheast, + APSoutheast2.Name: APSoutheast2, + EUCentral.Name: EUCentral, + EUWest.Name: EUWest, + USEast.Name: USEast, + USWest.Name: USWest, + USWest2.Name: USWest2, + USGovWest.Name: USGovWest, + SAEast.Name: SAEast, + CNNorth.Name: CNNorth, +} + +// Designates a signer interface suitable for signing AWS requests, params +// should be appropriately encoded for the request before signing. +// +// A signer should be initialized with Auth and the appropriate endpoint. +type Signer interface { + Sign(method, path string, params map[string]string) +} + +// An AWS Service interface with the API to query the AWS service +// +// Supplied as an easy way to mock out service calls during testing. +type AWSService interface { + // Queries the AWS service at a given method/path with the params and + // returns an http.Response and error + Query(method, path string, params map[string]string) (*http.Response, error) + // Builds an error given an XML payload in the http.Response, can be used + // to process an error if the status code is not 200 for example. + BuildError(r *http.Response) error +} + +// Implements a Server Query/Post API to easily query AWS services and build +// errors when desired +type Service struct { + service ServiceInfo + signer Signer +} + +// Create a base set of params for an action +func MakeParams(action string) map[string]string { + params := make(map[string]string) + params["Action"] = action + return params +} + +// Create a new AWS server to handle making requests +func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { + var signer Signer + switch service.Signer { + case V2Signature: + signer, err = NewV2Signer(auth, service) + // case V4Signature: + // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) + default: + err = fmt.Errorf("Unsupported signer for service") + } + if err != nil { + return + } + s = &Service{service: service, signer: signer} + return +} + +func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { + params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) + u, err := url.Parse(s.service.Endpoint) + if err != nil { + return nil, err + } + u.Path = path + + s.signer.Sign(method, path, params) + if method == "GET" { + u.RawQuery = multimap(params).Encode() + resp, err = http.Get(u.String()) + } else if method == "POST" { + resp, err = http.PostForm(u.String(), multimap(params)) + } + + return +} + +func (s *Service) BuildError(r *http.Response) error { + errors := ErrorResponse{} + xml.NewDecoder(r.Body).Decode(&errors) + var err Error + err = errors.Errors + err.RequestId = errors.RequestId + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + return &err +} + +type ErrorResponse struct { + Errors Error `xml:"Error"` + RequestId string // A unique ID for tracking the request +} + +type Error struct { + StatusCode int + Type string + Code string + Message string + RequestId string +} + +func (err *Error) Error() string { + return fmt.Sprintf("Type: %s, Code: %s, Message: %s", + err.Type, err.Code, err.Message, + ) +} + +type Auth struct { + AccessKey, SecretKey string + token string + expiration time.Time +} + +func (a *Auth) Token() string { + if a.token == "" { + return "" + } + if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock + *a, _ = GetAuth("", "", "", time.Time{}) + } + return a.token +} + +func (a *Auth) Expiration() time.Time { + return a.expiration +} + +// To be used with other APIs that return auth credentials such as STS +func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { + return &Auth{ + AccessKey: accessKey, + SecretKey: secretKey, + token: token, + expiration: expiration, + } +} + +// ResponseMetadata +type ResponseMetadata struct { + RequestId string // A unique ID for tracking the request +} + +type BaseResponse struct { + ResponseMetadata ResponseMetadata +} + +var unreserved = make([]bool, 128) +var hex = "0123456789ABCDEF" + +func init() { + // RFC3986 + u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" + for _, c := range u { + unreserved[c] = true + } +} + +func multimap(p map[string]string) url.Values { + q := make(url.Values, len(p)) + for k, v := range p { + q[k] = []string{v} + } + return q +} + +type credentials struct { + Code string + LastUpdated string + Type string + AccessKeyId string + SecretAccessKey string + Token string + Expiration string +} + +// GetMetaData retrieves instance metadata about the current machine. +// +// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. +func GetMetaData(path string) (contents []byte, err error) { + url := "http://169.254.169.254/latest/meta-data/" + path + + resp, err := RetryingClient.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) + return + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + return []byte(body), err +} + +func getInstanceCredentials() (cred credentials, err error) { + credentialPath := "iam/security-credentials/" + + // Get the instance role + role, err := GetMetaData(credentialPath) + if err != nil { + return + } + + // Get the instance role credentials + credentialJSON, err := GetMetaData(credentialPath + string(role)) + if err != nil { + return + } + + err = json.Unmarshal([]byte(credentialJSON), &cred) + return +} + +// GetAuth creates an Auth based on either passed in credentials, +// environment information or instance based role credentials. +func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { + // First try passed in credentials + if accessKey != "" && secretKey != "" { + return Auth{accessKey, secretKey, token, expiration}, nil + } + + // Next try to get auth from the shared credentials file + auth, err = SharedAuth() + if err == nil { + // Found auth, return + return + } + + // Next try to get auth from the environment + auth, err = EnvAuth() + if err == nil { + // Found auth, return + return + } + + // Next try getting auth from the instance role + cred, err := getInstanceCredentials() + if err == nil { + // Found auth, return + auth.AccessKey = cred.AccessKeyId + auth.SecretKey = cred.SecretAccessKey + auth.token = cred.Token + exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) + if err != nil { + err = fmt.Errorf("Error Parseing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) + } + auth.expiration = exptdate + return auth, err + } + err = errors.New("No valid AWS authentication found") + return auth, err +} + +// EnvAuth creates an Auth based on environment information. +// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment +// variables are used. +// AWS_SESSION_TOKEN is used if present. +func EnvAuth() (auth Auth, err error) { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + if auth.AccessKey == "" { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") + } + + auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + if auth.SecretKey == "" { + auth.SecretKey = os.Getenv("AWS_SECRET_KEY") + } + if auth.AccessKey == "" { + err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") + } + if auth.SecretKey == "" { + err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") + } + + auth.token = os.Getenv("AWS_SESSION_TOKEN") + return +} + +// SharedAuth creates an Auth based on shared credentials stored in +// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to +// select the profile. +func SharedAuth() (auth Auth, err error) { + var profileName = os.Getenv("AWS_PROFILE") + + if profileName == "" { + profileName = "default" + } + + var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE") + if credentialsFile == "" { + var homeDir = os.Getenv("HOME") + if homeDir == "" { + err = errors.New("Could not get HOME") + return + } + credentialsFile = homeDir + "/.aws/credentials" + } + + file, err := ini.LoadFile(credentialsFile) + if err != nil { + err = errors.New("Couldn't parse AWS credentials file") + return + } + + var profile = file[profileName] + if profile == nil { + err = errors.New("Couldn't find profile in AWS credentials file") + return + } + + auth.AccessKey = profile["aws_access_key_id"] + auth.SecretKey = profile["aws_secret_access_key"] + + if auth.AccessKey == "" { + err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file") + } + if auth.SecretKey == "" { + err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file") + } + return +} + +// Encode takes a string and URI-encodes it in a way suitable +// to be used in AWS signatures. +func Encode(s string) string { + encode := false + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + encode = true + break + } + } + if !encode { + return s + } + e := make([]byte, len(s)*3) + ei := 0 + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + e[ei] = '%' + e[ei+1] = hex[c>>4] + e[ei+2] = hex[c&0xF] + ei += 3 + } else { + e[ei] = c + ei += 1 + } + } + return string(e[:ei]) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go new file mode 100644 index 000000000..0c74a7905 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/aws_test.go @@ -0,0 +1,211 @@ +package aws_test + +import ( + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/goamz/goamz/aws" + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&S{}) + +type S struct { + environ []string +} + +func (s *S) SetUpSuite(c *C) { + s.environ = os.Environ() +} + +func (s *S) TearDownTest(c *C) { + os.Clearenv() + for _, kv := range s.environ { + l := strings.SplitN(kv, "=", 2) + os.Setenv(l[0], l[1]) + } +} + +func (s *S) TestSharedAuthNoHome(c *C) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "foo") + _, err := aws.SharedAuth() + c.Assert(err, ErrorMatches, "Could not get HOME") +} + +func (s *S) TestSharedAuthNoCredentialsFile(c *C) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "foo") + os.Setenv("HOME", "/tmp") + _, err := aws.SharedAuth() + c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file") +} + +func (s *S) TestSharedAuthNoProfileInFile(c *C) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "foo") + + d, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + defer os.RemoveAll(d) + + err = os.Mkdir(d+"/.aws", 0755) + if err != nil { + panic(err) + } + + ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644) + os.Setenv("HOME", d) + + _, err = aws.SharedAuth() + c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file") +} + +func (s *S) TestSharedAuthNoKeysInProfile(c *C) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "bar") + + d, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + defer os.RemoveAll(d) + + err = os.Mkdir(d+"/.aws", 0755) + if err != nil { + panic(err) + } + + ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644) + os.Setenv("HOME", d) + + _, err = aws.SharedAuth() + c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file") +} + +func (s *S) TestSharedAuthDefaultCredentials(c *C) { + os.Clearenv() + + d, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + defer os.RemoveAll(d) + + err = os.Mkdir(d+"/.aws", 0755) + if err != nil { + panic(err) + } + + ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644) + os.Setenv("HOME", d) + + auth, err := aws.SharedAuth() + c.Assert(err, IsNil) + c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) +} + +func (s *S) TestSharedAuth(c *C) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "bar") + + d, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + defer os.RemoveAll(d) + + err = os.Mkdir(d+"/.aws", 0755) + if err != nil { + panic(err) + } + + ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644) + os.Setenv("HOME", d) + + auth, err := aws.SharedAuth() + c.Assert(err, IsNil) + c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) +} + +func (s *S) TestEnvAuthNoSecret(c *C) { + os.Clearenv() + _, err := aws.EnvAuth() + c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") +} + +func (s *S) TestEnvAuthNoAccess(c *C) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "foo") + _, err := aws.EnvAuth() + c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") +} + +func (s *S) TestEnvAuth(c *C) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_ACCESS_KEY_ID", "access") + auth, err := aws.EnvAuth() + c.Assert(err, IsNil) + c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) +} + +func (s *S) TestEnvAuthAlt(c *C) { + os.Clearenv() + os.Setenv("AWS_SECRET_KEY", "secret") + os.Setenv("AWS_ACCESS_KEY", "access") + auth, err := aws.EnvAuth() + c.Assert(err, IsNil) + c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) +} + +func (s *S) TestEnvAuthToken(c *C) { + os.Clearenv() + os.Setenv("AWS_SECRET_KEY", "secret") + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SESSION_TOKEN", "token") + auth, err := aws.EnvAuth() + c.Assert(err, IsNil) + c.Assert(auth.SecretKey, Equals, "secret") + c.Assert(auth.AccessKey, Equals, "access") + c.Assert(auth.Token(), Equals, "token") +} + +func (s *S) TestGetAuthStatic(c *C) { + exptdate := time.Now().Add(time.Hour) + auth, err := aws.GetAuth("access", "secret", "token", exptdate) + c.Assert(err, IsNil) + c.Assert(auth.AccessKey, Equals, "access") + c.Assert(auth.SecretKey, Equals, "secret") + c.Assert(auth.Token(), Equals, "token") + c.Assert(auth.Expiration(), Equals, exptdate) +} + +func (s *S) TestGetAuthEnv(c *C) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_ACCESS_KEY_ID", "access") + auth, err := aws.GetAuth("", "", "", time.Time{}) + c.Assert(err, IsNil) + c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) +} + +func (s *S) TestEncode(c *C) { + c.Assert(aws.Encode("foo"), Equals, "foo") + c.Assert(aws.Encode("/"), Equals, "%2F") +} + +func (s *S) TestRegionsAreNamed(c *C) { + for n, r := range aws.Regions { + c.Assert(n, Equals, r.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go new file mode 100644 index 000000000..86d2ccec8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client.go @@ -0,0 +1,124 @@ +package aws + +import ( + "math" + "net" + "net/http" + "time" +) + +type RetryableFunc func(*http.Request, *http.Response, error) bool +type WaitFunc func(try int) +type DeadlineFunc func() time.Time + +type ResilientTransport struct { + // Timeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // The default is no timeout. + // + // With or without a timeout, the operating system may impose + // its own earlier timeout. For instance, TCP timeouts are + // often around 3 minutes. + DialTimeout time.Duration + + // MaxTries, if non-zero, specifies the number of times we will retry on + // failure. Retries are only attempted for temporary network errors or known + // safe failures. + MaxTries int + Deadline DeadlineFunc + ShouldRetry RetryableFunc + Wait WaitFunc + transport *http.Transport +} + +// Convenience method for creating an http client +func NewClient(rt *ResilientTransport) *http.Client { + rt.transport = &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + c, err := net.DialTimeout(netw, addr, rt.DialTimeout) + if err != nil { + return nil, err + } + c.SetDeadline(rt.Deadline()) + return c, nil + }, + Proxy: http.ProxyFromEnvironment, + } + // TODO: Would be nice is ResilientTransport allowed clients to initialize + // with http.Transport attributes. + return &http.Client{ + Transport: rt, + } +} + +var retryingTransport = &ResilientTransport{ + Deadline: func() time.Time { + return time.Now().Add(5 * time.Second) + }, + DialTimeout: 10 * time.Second, + MaxTries: 3, + ShouldRetry: awsRetry, + Wait: ExpBackoff, +} + +// Exported default client +var RetryingClient = NewClient(retryingTransport) + +func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.tries(req) +} + +// Retry a request a maximum of t.MaxTries times. +// We'll only retry if the proper criteria are met. +// If a wait function is specified, wait that amount of time +// In between requests. +func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { + for try := 0; try < t.MaxTries; try += 1 { + res, err = t.transport.RoundTrip(req) + + if !t.ShouldRetry(req, res, err) { + break + } + if res != nil { + res.Body.Close() + } + if t.Wait != nil { + t.Wait(try) + } + } + + return +} + +func ExpBackoff(try int) { + time.Sleep(100 * time.Millisecond * + time.Duration(math.Exp2(float64(try)))) +} + +func LinearBackoff(try int) { + time.Sleep(time.Duration(try*100) * time.Millisecond) +} + +// Decide if we should retry a request. +// In general, the criteria for retrying a request is described here +// http://docs.aws.amazon.com/general/latest/gr/api-retries.html +func awsRetry(req *http.Request, res *http.Response, err error) bool { + retry := false + + // Retry if there's a temporary network error. + if neterr, ok := err.(net.Error); ok { + if neterr.Temporary() { + retry = true + } + } + + // Retry if we get a 5xx series error. + if res != nil { + if res.StatusCode >= 500 && res.StatusCode < 600 { + retry = true + } + } + + return retry +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go new file mode 100644 index 000000000..c66a86333 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/client_test.go @@ -0,0 +1,121 @@ +package aws_test + +import ( + "fmt" + "github.com/goamz/goamz/aws" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +// Retrieve the response from handler using aws.RetryingClient +func serveAndGet(handler http.HandlerFunc) (body string, err error) { + ts := httptest.NewServer(handler) + defer ts.Close() + resp, err := aws.RetryingClient.Get(ts.URL) + if err != nil { + return + } + if resp.StatusCode != 200 { + return "", fmt.Errorf("Bad status code: %d", resp.StatusCode) + } + greeting, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return + } + return strings.TrimSpace(string(greeting)), nil +} + +func TestClient_expected(t *testing.T) { + body := "foo bar" + + resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, body) + }) + if err != nil { + t.Fatal(err) + } + if resp != body { + t.Fatal("Body not as expected.") + } +} + +func TestClient_delay(t *testing.T) { + body := "baz" + wait := 4 + resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { + if wait < 0 { + // If we dipped to zero delay and still failed. + t.Fatal("Never succeeded.") + } + wait -= 1 + time.Sleep(time.Second * time.Duration(wait)) + fmt.Fprintln(w, body) + }) + if err != nil { + t.Fatal(err) + } + if resp != body { + t.Fatal("Body not as expected.", resp) + } +} + +func TestClient_no4xxRetry(t *testing.T) { + tries := 0 + + // Fail once before succeeding. + _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { + tries += 1 + http.Error(w, "error", 404) + }) + + if err == nil { + t.Fatal("should have error") + } + + if tries != 1 { + t.Fatalf("should only try once: %d", tries) + } +} + +func TestClient_retries(t *testing.T) { + body := "biz" + failed := false + // Fail once before succeeding. + resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { + if !failed { + http.Error(w, "error", 500) + failed = true + } else { + fmt.Fprintln(w, body) + } + }) + if failed != true { + t.Error("We didn't retry!") + } + if err != nil { + t.Fatal(err) + } + if resp != body { + t.Fatal("Body not as expected.") + } +} + +func TestClient_fails(t *testing.T) { + tries := 0 + // Fail 3 times and return the last error. + _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { + tries += 1 + http.Error(w, "error", 500) + }) + if err == nil { + t.Fatal(err) + } + if tries != 3 { + t.Fatal("Didn't retry enough") + } +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go new file mode 100644 index 000000000..c4aca1d72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/export_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "net/http" + "time" +) + +// V4Signer: +// Exporting methods for testing + +func (s *V4Signer) RequestTime(req *http.Request) time.Time { + return s.requestTime(req) +} + +func (s *V4Signer) CanonicalRequest(req *http.Request) string { + return s.canonicalRequest(req) +} + +func (s *V4Signer) StringToSign(t time.Time, creq string) string { + return s.stringToSign(t, creq) +} + +func (s *V4Signer) Signature(t time.Time, sts string) string { + return s.signature(t, sts) +} + +func (s *V4Signer) Authorization(header http.Header, t time.Time, signature string) string { + return s.authorization(header, t, signature) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go new file mode 100644 index 000000000..508231e7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/regions.go @@ -0,0 +1,243 @@ +package aws + +var USGovWest = Region{ + "us-gov-west-1", + "https://ec2.us-gov-west-1.amazonaws.com", + "https://s3-fips-us-gov-west-1.amazonaws.com", + "", + true, + true, + "", + "", + "https://sns.us-gov-west-1.amazonaws.com", + "https://sqs.us-gov-west-1.amazonaws.com", + "https://iam.us-gov.amazonaws.com", + "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", + "https://dynamodb.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-gov-west-1.amazonaws.com", + "https://ecs.us-gov-west-1.amazonaws.com", +} + +var USEast = Region{ + "us-east-1", + "https://ec2.us-east-1.amazonaws.com", + "https://s3.amazonaws.com", + "", + false, + false, + "https://sdb.amazonaws.com", + "https://email.us-east-1.amazonaws.com", + "https://sns.us-east-1.amazonaws.com", + "https://sqs.us-east-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-east-1.amazonaws.com", + "https://dynamodb.us-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-east-1.amazonaws.com", + ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-east-1.amazonaws.com", + "https://ecs.us-east-1.amazonaws.com", +} + +var USWest = Region{ + "us-west-1", + "https://ec2.us-west-1.amazonaws.com", + "https://s3-us-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-1.amazonaws.com", + "", + "https://sns.us-west-1.amazonaws.com", + "https://sqs.us-west-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-1.amazonaws.com", + "https://dynamodb.us-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-west-1.amazonaws.com", + "https://ecs.us-west-1.amazonaws.com", +} + +var USWest2 = Region{ + "us-west-2", + "https://ec2.us-west-2.amazonaws.com", + "https://s3-us-west-2.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-2.amazonaws.com", + "https://email.us-west-2.amazonaws.com", + "https://sns.us-west-2.amazonaws.com", + "https://sqs.us-west-2.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-2.amazonaws.com", + "https://dynamodb.us-west-2.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-2.amazonaws.com", + ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-west-2.amazonaws.com", + "https://ecs.us-west-2.amazonaws.com", +} + +var EUWest = Region{ + "eu-west-1", + "https://ec2.eu-west-1.amazonaws.com", + "https://s3-eu-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-west-1.amazonaws.com", + "https://email.eu-west-1.amazonaws.com", + "https://sns.eu-west-1.amazonaws.com", + "https://sqs.eu-west-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-west-1.amazonaws.com", + "https://dynamodb.eu-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-west-1.amazonaws.com", + ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.eu-west-1.amazonaws.com", + "https://ecs.eu-west-1.amazonaws.com", +} + +var EUCentral = Region{ + "eu-central-1", + "https://ec2.eu-central-1.amazonaws.com", + "https://s3-eu-central-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-central-1.amazonaws.com", + "https://email.eu-central-1.amazonaws.com", + "https://sns.eu-central-1.amazonaws.com", + "https://sqs.eu-central-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-central-1.amazonaws.com", + "https://dynamodb.eu-central-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-central-1.amazonaws.com", + ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.eu-central-1.amazonaws.com", + "https://ecs.eu-central-1.amazonaws.com", +} + +var APSoutheast = Region{ + "ap-southeast-1", + "https://ec2.ap-southeast-1.amazonaws.com", + "https://s3-ap-southeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-1.amazonaws.com", + "", + "https://sns.ap-southeast-1.amazonaws.com", + "https://sqs.ap-southeast-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", + "https://dynamodb.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-1.amazonaws.com", + "https://ecs.ap-southeast-1.amazonaws.com", +} + +var APSoutheast2 = Region{ + "ap-southeast-2", + "https://ec2.ap-southeast-2.amazonaws.com", + "https://s3-ap-southeast-2.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-2.amazonaws.com", + "", + "https://sns.ap-southeast-2.amazonaws.com", + "https://sqs.ap-southeast-2.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", + "https://dynamodb.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-2.amazonaws.com", + "https://ecs.ap-southeast-2.amazonaws.com", +} + +var APNortheast = Region{ + "ap-northeast-1", + "https://ec2.ap-northeast-1.amazonaws.com", + "https://s3-ap-northeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-northeast-1.amazonaws.com", + "", + "https://sns.ap-northeast-1.amazonaws.com", + "https://sqs.ap-northeast-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", + "https://dynamodb.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-northeast-1.amazonaws.com", + "https://ecs.ap-northeast-1.amazonaws.com", +} + +var SAEast = Region{ + "sa-east-1", + "https://ec2.sa-east-1.amazonaws.com", + "https://s3-sa-east-1.amazonaws.com", + "", + true, + true, + "https://sdb.sa-east-1.amazonaws.com", + "", + "https://sns.sa-east-1.amazonaws.com", + "https://sqs.sa-east-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.sa-east-1.amazonaws.com", + "https://dynamodb.sa-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.sa-east-1.amazonaws.com", + ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.sa-east-1.amazonaws.com", + "https://ecs.sa-east-1.amazonaws.com", +} + +var CNNorth = Region{ + "cn-north-1", + "https://ec2.cn-north-1.amazonaws.com.cn", + "https://s3.cn-north-1.amazonaws.com.cn", + "", + true, + true, + "https://sdb.cn-north-1.amazonaws.com.cn", + "", + "https://sns.cn-north-1.amazonaws.com.cn", + "https://sqs.cn-north-1.amazonaws.com.cn", + "https://iam.cn-north-1.amazonaws.com.cn", + "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "https://dynamodb.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, + "https://autoscaling.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, + "https://sts.cn-north-1.amazonaws.com.cn", + "https://cloudformation.cn-north-1.amazonaws.com.cn", + "https://ecs.cn-north-1.amazonaws.com.cn", +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go new file mode 100644 index 000000000..5cf990525 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign.go @@ -0,0 +1,357 @@ +package aws + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "sort" + "strings" + "time" +) + +type V2Signer struct { + auth Auth + service ServiceInfo + host string +} + +var b64 = base64.StdEncoding + +func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { + u, err := url.Parse(service.Endpoint) + if err != nil { + return nil, err + } + return &V2Signer{auth: auth, service: service, host: u.Host}, nil +} + +func (s *V2Signer) Sign(method, path string, params map[string]string) { + params["AWSAccessKeyId"] = s.auth.AccessKey + params["SignatureVersion"] = "2" + params["SignatureMethod"] = "HmacSHA256" + if s.auth.Token() != "" { + params["SecurityToken"] = s.auth.Token() + } + + // AWS specifies that the parameters in a signed request must + // be provided in the natural order of the keys. This is distinct + // from the natural order of the encoded value of key=value. + // Percent and Equals affect the sorting order. + var keys, sarray []string + for k, _ := range params { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + sarray = append(sarray, Encode(k)+"="+Encode(params[k])) + } + joined := strings.Join(sarray, "&") + payload := method + "\n" + s.host + "\n" + path + "\n" + joined + hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + params["Signature"] = string(signature) +} + +// Common date formats for signing requests +const ( + ISO8601BasicFormat = "20060102T150405Z" + ISO8601BasicFormatShort = "20060102" +) + +type Route53Signer struct { + auth Auth +} + +func NewRoute53Signer(auth Auth) *Route53Signer { + return &Route53Signer{auth: auth} +} + +// getCurrentDate fetches the date stamp from the aws servers to +// ensure the auth headers are within 5 minutes of the server time +func (s *Route53Signer) getCurrentDate() string { + response, err := http.Get("https://route53.amazonaws.com/date") + if err != nil { + fmt.Print("Unable to get date from amazon: ", err) + return "" + } + + response.Body.Close() + return response.Header.Get("Date") +} + +// Creates the authorize signature based on the date stamp and secret key +func (s *Route53Signer) getHeaderAuthorize(message string) string { + hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hmacSha256.Write([]byte(message)) + cryptedString := hmacSha256.Sum(nil) + + return base64.StdEncoding.EncodeToString(cryptedString) +} + +// Adds all the required headers for AWS Route53 API to the request +// including the authorization +func (s *Route53Signer) Sign(req *http.Request) { + date := s.getCurrentDate() + authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", + s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) + + req.Header.Set("Host", req.Host) + req.Header.Set("X-Amzn-Authorization", authHeader) + req.Header.Set("X-Amz-Date", date) + req.Header.Set("Content-Type", "application/xml") +} + +/* +The V4Signer encapsulates all of the functionality to sign a request with the AWS +Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) +*/ +type V4Signer struct { + auth Auth + serviceName string + region Region +} + +/* +Return a new instance of a V4Signer capable of signing AWS requests. +*/ +func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { + return &V4Signer{auth: auth, serviceName: serviceName, region: region} +} + +/* +Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) + +The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" +or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires +the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from +the request.Host. + +The signed request will include a new "Authorization" header indicating that the request has been signed. + +Any changes to the request after signing the request will invalidate the signature. +*/ +func (s *V4Signer) Sign(req *http.Request) { + req.Header.Set("host", req.Host) // host header must be included as a signed header + t := s.requestTime(req) // Get requst time + creq := s.canonicalRequest(req) // Build canonical request + sts := s.stringToSign(t, creq) // Build string to sign + signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 + auth := s.authorization(req.Header, t, signature) // Create Authorization header value + req.Header.Set("Authorization", auth) // Add Authorization header to request + return +} + +/* +requestTime method will parse the time from the request "x-amz-date" or "date" headers. +If the "x-amz-date" header is present, that will take priority over the "date" header. +If neither header is defined or we are unable to parse either header as a valid date +then we will create a new "x-amz-date" header with the current time. +*/ +func (s *V4Signer) requestTime(req *http.Request) time.Time { + + // Get "x-amz-date" header + date := req.Header.Get("x-amz-date") + + // Attempt to parse as ISO8601BasicFormat + t, err := time.Parse(ISO8601BasicFormat, date) + if err == nil { + return t + } + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t + } + + // Get "date" header + date = req.Header.Get("date") + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + return t + } + + // Create a current time header to be used + t = time.Now().UTC() + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t +} + +/* +canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) + + CanonicalRequest = + HTTPRequestMethod + '\n' + + CanonicalURI + '\n' + + CanonicalQueryString + '\n' + + CanonicalHeaders + '\n' + + SignedHeaders + '\n' + + HexEncode(Hash(Payload)) +*/ +func (s *V4Signer) canonicalRequest(req *http.Request) string { + c := new(bytes.Buffer) + fmt.Fprintf(c, "%s\n", req.Method) + fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) + fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) + fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) + fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) + fmt.Fprintf(c, "%s", s.payloadHash(req)) + return c.String() +} + +func (s *V4Signer) canonicalURI(u *url.URL) string { + canonicalPath := u.RequestURI() + if u.RawQuery != "" { + canonicalPath = canonicalPath[:len(canonicalPath)-len(u.RawQuery)-1] + } + slash := strings.HasSuffix(canonicalPath, "/") + canonicalPath = path.Clean(canonicalPath) + if canonicalPath != "/" && slash { + canonicalPath += "/" + } + return canonicalPath +} + +func (s *V4Signer) canonicalQueryString(u *url.URL) string { + var a []string + for k, vs := range u.Query() { + k = url.QueryEscape(k) + for _, v := range vs { + if v == "" { + a = append(a, k+"=") + } else { + v = url.QueryEscape(v) + a = append(a, k+"="+v) + } + } + } + sort.Strings(a) + return strings.Join(a, "&") +} + +func (s *V4Signer) canonicalHeaders(h http.Header) string { + i, a := 0, make([]string, len(h)) + for k, v := range h { + for j, w := range v { + v[j] = strings.Trim(w, " ") + } + sort.Strings(v) + a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") + i++ + } + sort.Strings(a) + return strings.Join(a, "\n") +} + +func (s *V4Signer) signedHeaders(h http.Header) string { + i, a := 0, make([]string, len(h)) + for k, _ := range h { + a[i] = strings.ToLower(k) + i++ + } + sort.Strings(a) + return strings.Join(a, ";") +} + +func (s *V4Signer) payloadHash(req *http.Request) string { + var b []byte + if req.Body == nil { + b = []byte("") + } else { + var err error + b, err = ioutil.ReadAll(req.Body) + if err != nil { + // TODO: I REALLY DON'T LIKE THIS PANIC!!!! + panic(err) + } + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + return s.hash(string(b)) +} + +/* +stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) + + StringToSign = + Algorithm + '\n' + + RequestDate + '\n' + + CredentialScope + '\n' + + HexEncode(Hash(CanonicalRequest)) +*/ +func (s *V4Signer) stringToSign(t time.Time, creq string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256\n") + fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) + fmt.Fprintf(w, "%s\n", s.credentialScope(t)) + fmt.Fprintf(w, "%s", s.hash(creq)) + return w.String() +} + +func (s *V4Signer) credentialScope(t time.Time) string { + return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) +} + +/* +signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) + + signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) +*/ +func (s *V4Signer) signature(t time.Time, sts string) string { + h := s.hmac(s.derivedKey(t), []byte(sts)) + return fmt.Sprintf("%x", h) +} + +/* +derivedKey method derives a signing key to be used for signing a request. + + kSecret = Your AWS Secret Access Key + kDate = HMAC("AWS4" + kSecret, Date) + kRegion = HMAC(kDate, Region) + kService = HMAC(kRegion, Service) + kSigning = HMAC(kService, "aws4_request") +*/ +func (s *V4Signer) derivedKey(t time.Time) []byte { + h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) + h = s.hmac(h, []byte(s.region.Name)) + h = s.hmac(h, []byte(s.serviceName)) + h = s.hmac(h, []byte("aws4_request")) + return h +} + +/* +authorization method generates the authorization header value. +*/ +func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256 ") + fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) + fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) + fmt.Fprintf(w, "Signature=%s", signature) + return w.String() +} + +// hash method calculates the sha256 hash for a given string +func (s *V4Signer) hash(in string) string { + h := sha256.New() + fmt.Fprintf(h, "%s", in) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// hmac method calculates the sha256 hmac for a given slice of bytes +func (s *V4Signer) hmac(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go new file mode 100644 index 000000000..c6b685e20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/aws/sign_test.go @@ -0,0 +1,525 @@ +package aws_test + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/goamz/goamz/aws" + . "gopkg.in/check.v1" +) + +var _ = Suite(&V4SignerSuite{}) + +type V4SignerSuite struct { + auth aws.Auth + region aws.Region + cases []V4SignerSuiteCase +} + +type V4SignerSuiteCase struct { + label string + request V4SignerSuiteCaseRequest + canonicalRequest string + stringToSign string + signature string + authorization string +} + +type V4SignerSuiteCaseRequest struct { + method string + host string + url string + headers []string + body string +} + +func (s *V4SignerSuite) SetUpSuite(c *C) { + s.auth = aws.Auth{AccessKey: "AKIDEXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"} + s.region = aws.USEast + + // Test cases from the Signature Version 4 Test Suite (http://goo.gl/nguvs0) + s.cases = append(s.cases, + + // get-header-key-duplicate + V4SignerSuiteCase{ + label: "get-header-key-duplicate", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar", "zoo:foobar", "zoo:zoobar"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:foobar,zoobar,zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3c52f0eaae2b61329c0a332e3fa15842a37bc5812cf4d80eb64784308850e313", + signature: "54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed", + }, + + // get-header-value-order + V4SignerSuiteCase{ + label: "get-header-value-order", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p:z", "p:a", "p:p", "p:a"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:a,a,p,z\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n94c0389fefe0988cbbedc8606f0ca0b485b48da010d09fc844b45b697c8924fe", + signature: "d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d", + }, + + // get-header-value-trim + V4SignerSuiteCase{ + label: "get-header-value-trim", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p: phfft "}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:phfft\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndddd1902add08da1ac94782b05f9278c08dc7468db178a84f8950d93b30b1f35", + signature: "debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592", + }, + + // get-relative-relative + V4SignerSuiteCase{ + label: "get-relative-relative", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/foo/bar/../..", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // get-relative + V4SignerSuiteCase{ + label: "get-relative", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/foo/..", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // get-slash-dot-slash + V4SignerSuiteCase{ + label: "get-slash-dot-slash", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/./", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // get-slash-pointless-dot + V4SignerSuiteCase{ + label: "get-slash-pointless-dot", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/./foo", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n8021a97572ee460f87ca67f4e8c0db763216d84715f5424a843a5312a3321e2d", + signature: "910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a", + }, + + // get-slash + V4SignerSuiteCase{ + label: "get-slash", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "//", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // get-slashes + V4SignerSuiteCase{ + label: "get-slashes", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "//foo//", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/foo/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n6bb4476ee8745730c9cb79f33a0c70baa6d8af29c0077fa12e4e8f1dd17e7098", + signature: "b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19", + }, + + // get-space + V4SignerSuiteCase{ + label: "get-space", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/%20/foo", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/%20/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n69c45fb9fe3fd76442b5086e50b2e9fec8298358da957b293ef26e506fdfb54b", + signature: "f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374", + }, + + // get-unreserved + V4SignerSuiteCase{ + label: "get-unreserved", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndf63ee3247c0356c696a3b21f8d8490b01fa9cd5bc6550ef5ef5f4636b7b8901", + signature: "830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e", + }, + + // get-utf8 + V4SignerSuiteCase{ + label: "get-utf8", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/%E1%88%B4", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/%E1%88%B4\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n27ba31df5dbc6e063d8f87d62eb07143f7f271c5330a917840586ac1c85b6f6b", + signature: "8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74", + }, + + // get-vanilla-empty-query-key + V4SignerSuiteCase{ + label: "get-vanilla-empty-query-key", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?foo=bar", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n0846c2945b0832deb7a463c66af5c4f8bd54ec28c438e67a214445b157c9ddf8", + signature: "56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f", + }, + + // get-vanilla-query-order-key-case + V4SignerSuiteCase{ + label: "get-vanilla-query-order-key-case", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?foo=Zoo&foo=aha", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\nfoo=Zoo&foo=aha\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ne25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d", + signature: "be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09", + }, + + // get-vanilla-query-order-key + V4SignerSuiteCase{ + label: "get-vanilla-query-order-key", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?a=foo&b=foo", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\na=foo&b=foo\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n2f23d14fe13caebf6dfda346285c6d9c14f49eaca8f5ec55c627dd7404f7a727", + signature: "0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b", + }, + + // get-vanilla-query-order-value + V4SignerSuiteCase{ + label: "get-vanilla-query-order-value", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?foo=b&foo=a", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\nfoo=a&foo=b\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n33dffc220e89131f8f6157a35c40903daa658608d9129ff9489e5cf5bbd9b11b", + signature: "feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc", + }, + + // get-vanilla-query-unreserved + V4SignerSuiteCase{ + label: "get-vanilla-query-unreserved", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nd2578f3156d4c9d180713d1ff20601d8a3eed0dd35447d24603d7d67414bd6b5", + signature: "f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb", + }, + + // get-vanilla-query + V4SignerSuiteCase{ + label: "get-vanilla-query", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // get-vanilla-ut8-query + V4SignerSuiteCase{ + label: "get-vanilla-ut8-query", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/?ሴ=bar", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n%E1%88%B4=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nde5065ff39c131e6c2e2bd19cd9345a794bf3b561eab20b8d97b2093fc2a979e", + signature: "6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c", + }, + + // get-vanilla + V4SignerSuiteCase{ + label: "get-vanilla", + request: V4SignerSuiteCaseRequest{ + method: "GET", + host: "host.foo.com", + url: "/", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", + signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", + }, + + // post-header-key-case + V4SignerSuiteCase{ + label: "post-header-key-case", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4", + signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", + }, + + // post-header-key-sort + V4SignerSuiteCase{ + label: "post-header-key-sort", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n34e1bddeb99e76ee01d63b5e28656111e210529efeec6cdfd46a48e4c734545d", + signature: "b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a", + }, + + // post-header-value-case + V4SignerSuiteCase{ + label: "post-header-value-case", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "zoo:ZOOBAR"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:ZOOBAR\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3aae6d8274b8c03e2cc96fc7d6bda4b9bd7a0a184309344470b2c96953e124aa", + signature: "273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7", + }, + + // post-vanilla-empty-query-value + V4SignerSuiteCase{ + label: "post-vanilla-empty-query-value", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/?foo=bar", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e", + signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", + }, + + // post-vanilla-query + V4SignerSuiteCase{ + label: "post-vanilla-query", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/?foo=bar", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e", + signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", + }, + + // post-vanilla + V4SignerSuiteCase{ + label: "post-vanilla", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + }, + canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4", + signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", + }, + + // post-x-www-form-urlencoded-parameters + V4SignerSuiteCase{ + label: "post-x-www-form-urlencoded-parameters", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"Content-Type:application/x-www-form-urlencoded; charset=utf8", "Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + body: "foo=bar", + }, + canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf8\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nc4115f9e54b5cecf192b1eaa23b8e88ed8dc5391bd4fde7b3fff3d9c9fe0af1f", + signature: "b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71", + }, + + // post-x-www-form-urlencoded + V4SignerSuiteCase{ + label: "post-x-www-form-urlencoded", + request: V4SignerSuiteCaseRequest{ + method: "POST", + host: "host.foo.com", + url: "/", + headers: []string{"Content-Type:application/x-www-form-urlencoded", "Date:Mon, 09 Sep 2011 23:36:00 GMT"}, + body: "foo=bar", + }, + canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a", + stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n4c5c6e4b52fb5fb947a8733982a8a5a61b14f04345cbfe6e739236c76dd48f74", + signature: "5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc", + authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc", + }, + ) +} + +func (s *V4SignerSuite) TestCases(c *C) { + signer := aws.NewV4Signer(s.auth, "host", s.region) + + for _, testCase := range s.cases { + + req, err := http.NewRequest(testCase.request.method, "http://"+testCase.request.host+testCase.request.url, strings.NewReader(testCase.request.body)) + c.Assert(err, IsNil, Commentf("Testcase: %s", testCase.label)) + for _, v := range testCase.request.headers { + h := strings.SplitN(v, ":", 2) + req.Header.Add(h[0], h[1]) + } + req.Header.Set("host", req.Host) + + t := signer.RequestTime(req) + + canonicalRequest := signer.CanonicalRequest(req) + c.Check(canonicalRequest, Equals, testCase.canonicalRequest, Commentf("Testcase: %s", testCase.label)) + + stringToSign := signer.StringToSign(t, canonicalRequest) + c.Check(stringToSign, Equals, testCase.stringToSign, Commentf("Testcase: %s", testCase.label)) + + signature := signer.Signature(t, stringToSign) + c.Check(signature, Equals, testCase.signature, Commentf("Testcase: %s", testCase.label)) + + authorization := signer.Authorization(req.Header, t, signature) + c.Check(authorization, Equals, testCase.authorization, Commentf("Testcase: %s", testCase.label)) + + signer.Sign(req) + c.Check(req.Header.Get("Authorization"), Equals, testCase.authorization, Commentf("Testcase: %s", testCase.label)) + } +} + +func ExampleV4Signer() { + // Get auth from env vars + auth, err := aws.EnvAuth() + if err != nil { + fmt.Println(err) + } + + // Create a signer with the auth, name of the service, and aws region + signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast) + + // Create a request + req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request")) + if err != nil { + fmt.Println(err) + } + + // Date or x-amz-date header is required to sign a request + req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat)) + + // Sign the request + signer.Sign(req) + + // Issue signed request + http.DefaultClient.Do(req) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go new file mode 100644 index 000000000..4ff913cde --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/export_test.go @@ -0,0 +1,17 @@ +package s3 + +import ( + "github.com/goamz/goamz/aws" +) + +func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) { + sign(auth, method, path, params, headers) +} + +func SetListPartsMax(n int) { + listPartsMax = n +} + +func SetListMultiMax(n int) { + listMultiMax = n +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go new file mode 100644 index 000000000..1533bda9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi.go @@ -0,0 +1,409 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "sort" + "strconv" +) + +// Multi represents an unfinished multipart upload. +// +// Multipart uploads allow sending big objects in smaller chunks. +// After all parts have been sent, the upload must be explicitly +// completed by calling Complete with the list of parts. +// +// See http://goo.gl/vJfTG for an overview of multipart uploads. +type Multi struct { + Bucket *Bucket + Key string + UploadId string +} + +// That's the default. Here just for testing. +var listMultiMax = 1000 + +type listMultiResp struct { + NextKeyMarker string + NextUploadIdMarker string + IsTruncated bool + Upload []Multi + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// ListMulti returns the list of unfinished multipart uploads in b. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. You can use prefixes to separate a bucket into different +// groupings of keys (to get the feeling of folders, for example). +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// See http://goo.gl/ePioY for details. +func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { + params := map[string][]string{ + "uploads": {""}, + "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, + "prefix": {prefix}, + "delimiter": {delim}, + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: b.Name, + params: params, + } + var resp listMultiResp + err := b.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, nil, err + } + for i := range resp.Upload { + multi := &resp.Upload[i] + multi.Bucket = b + multis = append(multis, multi) + } + prefixes = append(prefixes, resp.CommonPrefixes...) + if !resp.IsTruncated { + return multis, prefixes, nil + } + params["key-marker"] = []string{resp.NextKeyMarker} + params["upload-id-marker"] = []string{resp.NextUploadIdMarker} + attempt = b.S3.AttemptStrategy.Start() // Last request worked. + } + panic("unreachable") +} + +// Multi returns a multipart upload handler for the provided key +// inside b. If a multipart upload exists for key, it is returned, +// otherwise a new multipart upload is initiated with contType and perm. +func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) { + multis, _, err := b.ListMulti(key, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + for _, m := range multis { + if m.Key == key { + return m, nil + } + } + return b.InitMulti(key, contType, perm) +} + +// InitMulti initializes a new multipart upload at the provided +// key inside b and returns a value for manipulating it. +// +// See http://goo.gl/XP8kL for details. +func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) { + headers := map[string][]string{ + "Content-Type": {contType}, + "Content-Length": {"0"}, + "x-amz-acl": {string(perm)}, + } + params := map[string][]string{ + "uploads": {""}, + } + req := &request{ + method: "POST", + bucket: b.Name, + path: key, + headers: headers, + params: params, + } + var err error + var resp struct { + UploadId string `xml:"UploadId"` + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, &resp) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil +} + +// PutPart sends part n of the multipart upload, reading all the content from r. +// Each part, except for the last one, must be at least 5MB in size. +// +// See http://goo.gl/pqZer for details. +func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64) +} + +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(partSize, 10)}, + "Content-MD5": {md5b64}, + } + params := map[string][]string{ + "uploadId": {m.UploadId}, + "partNumber": {strconv.FormatInt(int64(n), 10)}, + } + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + _, err := r.Seek(0, 0) + if err != nil { + return Part{}, err + } + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + payload: r, + } + err = m.Bucket.S3.prepare(req) + if err != nil { + return Part{}, err + } + resp, err := m.Bucket.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return Part{}, err + } + etag := resp.Header.Get("ETag") + if etag == "" { + return Part{}, errors.New("part upload succeeded with no ETag") + } + return Part{n, etag, partSize}, nil + } + panic("unreachable") +} + +func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { + _, err = r.Seek(0, 0) + if err != nil { + return 0, "", "", err + } + digest := md5.New() + size, err = io.Copy(digest, r) + if err != nil { + return 0, "", "", err + } + sum := digest.Sum(nil) + md5hex = hex.EncodeToString(sum) + md5b64 = base64.StdEncoding.EncodeToString(sum) + return size, md5hex, md5b64, nil +} + +type Part struct { + N int `xml:"PartNumber"` + ETag string + Size int64 +} + +type partSlice []Part + +func (s partSlice) Len() int { return len(s) } +func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } +func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type listPartsResp struct { + NextPartNumberMarker string + IsTruncated bool + Part []Part +} + +// That's the default. Here just for testing. +var listPartsMax = 1000 + +// ListParts returns the list of previously uploaded parts in m, +// ordered by part number. +// +// See http://goo.gl/ePioY for details. +func (m *Multi) ListParts() ([]Part, error) { + params := map[string][]string{ + "uploadId": {m.UploadId}, + "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)}, + } + var parts partSlice + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + var resp listPartsResp + err := m.Bucket.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + parts = append(parts, resp.Part...) + if !resp.IsTruncated { + sort.Sort(parts) + return parts, nil + } + params["part-number-marker"] = []string{resp.NextPartNumberMarker} + attempt = m.Bucket.S3.AttemptStrategy.Start() // Last request worked. + } + panic("unreachable") +} + +type ReaderAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} + +// PutAll sends all of r via a multipart upload with parts no larger +// than partSize bytes, which must be set to at least 5MB. +// Parts previously uploaded are either reused if their checksum +// and size match the new part, or otherwise overwritten with the +// new content. +// PutAll returns all the parts of m (reused or not). +func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { + old, err := m.ListParts() + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + reuse := 0 // Index of next old part to consider reusing. + current := 1 // Part number of latest good part handled. + totalSize, err := r.Seek(0, 2) + if err != nil { + return nil, err + } + first := true // Must send at least one empty part if the file is empty. + var result []Part +NextSection: + for offset := int64(0); offset < totalSize || first; offset += partSize { + first = false + if offset+partSize > totalSize { + partSize = totalSize - offset + } + section := io.NewSectionReader(r, offset, partSize) + _, md5hex, md5b64, err := seekerInfo(section) + if err != nil { + return nil, err + } + for reuse < len(old) && old[reuse].N <= current { + // Looks like this part was already sent. + part := &old[reuse] + etag := `"` + md5hex + `"` + if part.N == current && part.Size == partSize && part.ETag == etag { + // Checksum matches. Reuse the old part. + result = append(result, *part) + current++ + continue NextSection + } + reuse++ + } + + // Part wasn't found or doesn't match. Send it. + part, err := m.putPart(current, section, partSize, md5b64) + if err != nil { + return nil, err + } + result = append(result, part) + current++ + } + return result, nil +} + +type completeUpload struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts completeParts `xml:"Part"` +} + +type completePart struct { + PartNumber int + ETag string +} + +type completeParts []completePart + +func (p completeParts) Len() int { return len(p) } +func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } +func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Complete assembles the given previously uploaded parts into the +// final object. This operation may take several minutes. +// +// See http://goo.gl/2Z7Tw for details. +func (m *Multi) Complete(parts []Part) error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + c := completeUpload{} + for _, p := range parts { + c.Parts = append(c.Parts, completePart{p.N, p.ETag}) + } + sort.Sort(c.Parts) + data, err := xml.Marshal(&c) + if err != nil { + return err + } + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "POST", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + payload: bytes.NewReader(data), + } + err := m.Bucket.S3.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} + +// Abort deletes an unifinished multipart upload and any previously +// uploaded parts for it. +// +// After a multipart upload is aborted, no additional parts can be +// uploaded using it. However, if any part uploads are currently in +// progress, those part uploads might or might not succeed. As a result, +// it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// NOTE: If the described scenario happens to you, please report back to +// the goamz authors with details. In the future such retrying should be +// handled internally, but it's not clear what happens precisely (Is an +// error returned? Is the issue completely undetectable?). +// +// See http://goo.gl/dnyJw for details. +func (m *Multi) Abort() error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + err := m.Bucket.S3.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go new file mode 100644 index 000000000..efab302d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/multi_test.go @@ -0,0 +1,371 @@ +package s3_test + +import ( + "encoding/xml" + "io" + "io/ioutil" + "strings" + + "github.com/goamz/goamz/s3" + . "gopkg.in/check.v1" +) + +func (s *S) TestInitMulti(c *C) { + testServer.Response(200, nil, InitMultiResultDump) + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "POST") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"}) + c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) + c.Assert(req.Form["uploads"], DeepEquals, []string{""}) + + c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") +} + +func (s *S) TestMultiNoPreviousUpload(c *C) { + // Don't retry the NoSuchUpload error. + s.DisableRetries() + + testServer.Response(404, nil, NoSuchUploadErrorDump) + testServer.Response(200, nil, InitMultiResultDump) + + b := s.s3.Bucket("sample") + + multi, err := b.Multi("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/") + c.Assert(req.Form["uploads"], DeepEquals, []string{""}) + c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"}) + + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "POST") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["uploads"], DeepEquals, []string{""}) + + c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") +} + +func (s *S) TestMultiReturnOld(c *C) { + testServer.Response(200, nil, ListMultiResultDump) + + b := s.s3.Bucket("sample") + + multi, err := b.Multi("multi1", "text/plain", s3.Private) + c.Assert(err, IsNil) + c.Assert(multi.Key, Equals, "multi1") + c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/") + c.Assert(req.Form["uploads"], DeepEquals, []string{""}) + c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"}) +} + +func (s *S) TestListParts(c *C) { + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(200, nil, ListPartsResultDump1) + testServer.Response(404, nil, NoSuchUploadErrorDump) // :-( + testServer.Response(200, nil, ListPartsResultDump2) + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + parts, err := multi.ListParts() + c.Assert(err, IsNil) + c.Assert(parts, HasLen, 3) + c.Assert(parts[0].N, Equals, 1) + c.Assert(parts[0].Size, Equals, int64(5)) + c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) + c.Assert(parts[1].N, Equals, 2) + c.Assert(parts[1].Size, Equals, int64(5)) + c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`) + c.Assert(parts[2].N, Equals, 3) + c.Assert(parts[2].Size, Equals, int64(5)) + c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) + testServer.WaitRequest() + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") + c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) + + testServer.WaitRequest() // The internal error. + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") + c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) + c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"}) +} + +func (s *S) TestPutPart(c *C) { + headers := map[string]string{ + "ETag": `"26f90efd10d614f100252ff56d88dad8"`, + } + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(200, headers, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + part, err := multi.PutPart(1, strings.NewReader("")) + c.Assert(err, IsNil) + c.Assert(part.N, Equals, 1) + c.Assert(part.Size, Equals, int64(8)) + c.Assert(part.ETag, Equals, headers["ETag"]) + + testServer.WaitRequest() + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"}) + c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="}) +} + +func readAll(r io.Reader) string { + data, err := ioutil.ReadAll(r) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *S) TestPutAllNoPreviousUpload(c *C) { + // Don't retry the NoSuchUpload error. + s.DisableRetries() + + etag1 := map[string]string{"ETag": `"etag1"`} + etag2 := map[string]string{"ETag": `"etag2"`} + etag3 := map[string]string{"ETag": `"etag3"`} + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(404, nil, NoSuchUploadErrorDump) + testServer.Response(200, etag1, "") + testServer.Response(200, etag2, "") + testServer.Response(200, etag3, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) + c.Assert(parts, HasLen, 3) + c.Assert(parts[0].ETag, Equals, `"etag1"`) + c.Assert(parts[1].ETag, Equals, `"etag2"`) + c.Assert(parts[2].ETag, Equals, `"etag3"`) + c.Assert(err, IsNil) + + // Init + testServer.WaitRequest() + + // List old parts. Won't find anything. + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/multi") + + // Send part 1. + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) + c.Assert(readAll(req.Body), Equals, "part1") + + // Send part 2. + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) + c.Assert(readAll(req.Body), Equals, "part2") + + // Send part 3 with shorter body. + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"}) + c.Assert(readAll(req.Body), Equals, "last") +} + +func (s *S) TestPutAllZeroSizeFile(c *C) { + // Don't retry the NoSuchUpload error. + s.DisableRetries() + + etag1 := map[string]string{"ETag": `"etag1"`} + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(404, nil, NoSuchUploadErrorDump) + testServer.Response(200, etag1, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + // Must send at least one part, so that completing it will work. + parts, err := multi.PutAll(strings.NewReader(""), 5) + c.Assert(parts, HasLen, 1) + c.Assert(parts[0].ETag, Equals, `"etag1"`) + c.Assert(err, IsNil) + + // Init + testServer.WaitRequest() + + // List old parts. Won't find anything. + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/multi") + + // Send empty part. + req = testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"}) + c.Assert(readAll(req.Body), Equals, "") +} + +func (s *S) TestPutAllResume(c *C) { + etag2 := map[string]string{"ETag": `"etag2"`} + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(200, nil, ListPartsResultDump1) + testServer.Response(200, nil, ListPartsResultDump2) + testServer.Response(200, etag2, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + // "part1" and "part3" match the checksums in ResultDump1. + // The middle one is a mismatch (it refers to "part2"). + parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5) + c.Assert(parts, HasLen, 3) + c.Assert(parts[0].N, Equals, 1) + c.Assert(parts[0].Size, Equals, int64(5)) + c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) + c.Assert(parts[1].N, Equals, 2) + c.Assert(parts[1].Size, Equals, int64(5)) + c.Assert(parts[1].ETag, Equals, `"etag2"`) + c.Assert(parts[2].N, Equals, 3) + c.Assert(parts[2].Size, Equals, int64(5)) + c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) + c.Assert(err, IsNil) + + // Init + testServer.WaitRequest() + + // List old parts, broken in two requests. + for i := 0; i < 2; i++ { + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/multi") + } + + // Send part 2, as it didn't match the checksum. + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) + c.Assert(readAll(req.Body), Equals, "partX") +} + +func (s *S) TestMultiComplete(c *C) { + testServer.Response(200, nil, InitMultiResultDump) + // Note the 200 response. Completing will hold the connection on some + // kind of long poll, and may return a late error even after a 200. + testServer.Response(200, nil, InternalErrorDump) + testServer.Response(200, nil, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}}) + c.Assert(err, IsNil) + + testServer.WaitRequest() + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "POST") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") + + var payload struct { + XMLName xml.Name + Part []struct { + PartNumber int + ETag string + } + } + + dec := xml.NewDecoder(req.Body) + err = dec.Decode(&payload) + c.Assert(err, IsNil) + + c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload") + c.Assert(len(payload.Part), Equals, 2) + c.Assert(payload.Part[0].PartNumber, Equals, 1) + c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`) + c.Assert(payload.Part[1].PartNumber, Equals, 2) + c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`) +} + +func (s *S) TestMultiAbort(c *C) { + testServer.Response(200, nil, InitMultiResultDump) + testServer.Response(200, nil, "") + + b := s.s3.Bucket("sample") + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + + err = multi.Abort() + c.Assert(err, IsNil) + + testServer.WaitRequest() + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "DELETE") + c.Assert(req.URL.Path, Equals, "/sample/multi") + c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") +} + +func (s *S) TestListMulti(c *C) { + testServer.Response(200, nil, ListMultiResultDump) + + b := s.s3.Bucket("sample") + + multis, prefixes, err := b.ListMulti("", "/") + c.Assert(err, IsNil) + c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) + c.Assert(multis, HasLen, 2) + c.Assert(multis[0].Key, Equals, "multi1") + c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD") + c.Assert(multis[1].Key, Equals, "multi2") + c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/sample/") + c.Assert(req.Form["uploads"], DeepEquals, []string{""}) + c.Assert(req.Form["prefix"], DeepEquals, []string{""}) + c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) + c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"}) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go new file mode 100644 index 000000000..414ede0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/responses_test.go @@ -0,0 +1,202 @@ +package s3_test + +var GetObjectErrorDump = ` + + + NoSuchBucket + The specified bucket does not exist + non-existent-bucket + 3F1B667FAD71C3D8 + L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D + +` + +var GetListResultDump1 = ` + + + quotes + N + false + + Nelson + 2006-01-01T12:00:00.000Z + "828ef3fdfa96f00ad9f27c383fc9ac7f" + 5 + STANDARD + + bcaf161ca5fb16fd081034f + webfile + + + + Neo + 2006-01-01T12:00:00.000Z + "828ef3fdfa96f00ad9f27c383fc9ac7f" + 4 + STANDARD + + bcaf1ffd86a5fb16fd081034f + webfile + + + +` + +var GetListResultDump2 = ` + + example-bucket + photos/2006/ + some-marker + 1000 + / + false + + + photos/2006/feb/ + + + photos/2006/jan/ + + +` + +var InitMultiResultDump = ` + + + sample + multi + JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- + +` + +var ListPartsResultDump1 = ` + + + sample + multi + JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- + + bb5c0f63b0b25f2d099c + joe + + + bb5c0f63b0b25f2d099c + joe + + STANDARD + 0 + 2 + 2 + true + + 1 + 2013-01-30T13:45:51.000Z + "ffc88b4ca90a355f8ddba6b2c3b2af5c" + 5 + + + 2 + 2013-01-30T13:45:52.000Z + "d067a0fa9dc61a6e7195ca99696b5a89" + 5 + + +` + +var ListPartsResultDump2 = ` + + + sample + multi + JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- + + bb5c0f63b0b25f2d099c + joe + + + bb5c0f63b0b25f2d099c + joe + + STANDARD + 2 + 3 + 2 + false + + 3 + 2013-01-30T13:46:50.000Z + "49dcd91231f801159e893fb5c6674985" + 5 + + +` + +var ListMultiResultDump = ` + + + goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a + + + multi1 + iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ-- + / + 1000 + false + + multi1 + iUVug89pPvSswrikD + + bb5c0f63b0b25f2d0 + gustavoniemeyer + + + bb5c0f63b0b25f2d0 + gustavoniemeyer + + STANDARD + 2013-01-30T18:15:47.000Z + + + multi2 + DkirwsSvPp98guVUi + + bb5c0f63b0b25f2d0 + joe + + + bb5c0f63b0b25f2d0 + joe + + STANDARD + 2013-01-30T18:15:47.000Z + + + a/ + + + b/ + + +` + +var NoSuchUploadErrorDump = ` + + + NoSuchUpload + Not relevant + sample + 3F1B667FAD71C3D8 + kjhwqk + +` + +var InternalErrorDump = ` + + + InternalError + Not relevant + sample + 3F1B667FAD71C3D8 + kjhwqk + +` diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go new file mode 100644 index 000000000..88ef975d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3.go @@ -0,0 +1,1151 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// + +package s3 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/xml" + "fmt" + "github.com/goamz/goamz/aws" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strconv" + "strings" + "time" +) + +const debug = false + +// The S3 type encapsulates operations with an S3 region. +type S3 struct { + aws.Auth + aws.Region + + // ConnectTimeout is the maximum time a request attempt will + // wait for a successful connection to be made. + // + // A value of zero means no timeout. + ConnectTimeout time.Duration + + // ReadTimeout is the maximum time a request attempt will wait + // for an individual read to complete. + // + // A value of zero means no timeout. + ReadTimeout time.Duration + + // WriteTimeout is the maximum time a request attempt will + // wait for an individual write to complete. + // + // A value of zero means no timeout. + WriteTimeout time.Duration + + // RequestTimeout is the maximum time a request attempt can + // take before operations return a timeout error. + // + // This includes connection time, any redirects, and reading + // the response body. The timer remains running after the request + // is made so it can interrupt reading of the response data. + // + // A Timeout of zero means no timeout. + RequestTimeout time.Duration + + // AttemptStrategy is the attempt strategy used for requests. + aws.AttemptStrategy + + // Reserve the right of using private data. + private byte + + // client used for requests + client *http.Client +} + +// The Bucket type encapsulates operations with an S3 bucket. +type Bucket struct { + *S3 + Name string +} + +// The Owner type represents the owner of the object in an S3 bucket. +type Owner struct { + ID string + DisplayName string +} + +// Fold options into an Options struct +// +type Options struct { + SSE bool + Meta map[string][]string + ContentEncoding string + CacheControl string + RedirectLocation string + ContentMD5 string + // What else? + // Content-Disposition string + //// The following become headers so they are []strings rather than strings... I think + // x-amz-storage-class []string +} + +type CopyOptions struct { + Options + MetadataDirective string + ContentType string +} + +// CopyObjectResult is the output from a Copy request +type CopyObjectResult struct { + ETag string + LastModified string +} + +// DefaultAttemptStrategy is the default AttemptStrategy used by S3 objects created by New. +var DefaultAttemptStrategy = aws.AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// New creates a new S3. Optional client argument allows for custom http.clients to be used. +func New(auth aws.Auth, region aws.Region, client ...*http.Client) *S3 { + + var httpclient *http.Client + + if len(client) > 0 { + httpclient = client[0] + } + + return &S3{Auth: auth, Region: region, AttemptStrategy: DefaultAttemptStrategy, client: httpclient} +} + +// Bucket returns a Bucket with the given name. +func (s3 *S3) Bucket(name string) *Bucket { + if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { + name = strings.ToLower(name) + } + return &Bucket{s3, name} +} + +var createBucketConfiguration = ` + %s +` + +// locationConstraint returns an io.Reader specifying a LocationConstraint if +// required for the region. +// +// See http://goo.gl/bh9Kq for details. +func (s3 *S3) locationConstraint() io.Reader { + constraint := "" + if s3.Region.S3LocationConstraint { + constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) + } + return strings.NewReader(constraint) +} + +type ACL string + +const ( + Private = ACL("private") + PublicRead = ACL("public-read") + PublicReadWrite = ACL("public-read-write") + AuthenticatedRead = ACL("authenticated-read") + BucketOwnerRead = ACL("bucket-owner-read") + BucketOwnerFull = ACL("bucket-owner-full-control") +) + +// PutBucket creates a new bucket. +// +// See http://goo.gl/ndjnR for details. +func (b *Bucket) PutBucket(perm ACL) error { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + } + req := &request{ + method: "PUT", + bucket: b.Name, + path: "/", + headers: headers, + payload: b.locationConstraint(), + } + return b.S3.query(req, nil) +} + +// DelBucket removes an existing S3 bucket. All objects in the bucket must +// be removed before the bucket itself can be removed. +// +// See http://goo.gl/GoBrY for details. +func (b *Bucket) DelBucket() (err error) { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, nil) + if !shouldRetry(err) { + break + } + } + return err +} + +// Get retrieves an object from an S3 bucket. +// +// See http://goo.gl/isCO7 for details. +func (b *Bucket) Get(path string) (data []byte, err error) { + body, err := b.GetReader(path) + defer func() { + if body != nil { + body.Close() + } + }() + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(body) + return data, err +} + +// GetReader retrieves an object from an S3 bucket, +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading. +func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { + resp, err := b.GetResponse(path) + if resp != nil { + return resp.Body, err + } + return nil, err +} + +// GetResponse retrieves an object from an S3 bucket, +// returning the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { + return b.GetResponseWithHeaders(path, make(http.Header)) +} + +// GetReaderWithHeaders retrieves an object from an S3 bucket +// Accepts custom headers to be sent as the second parameter +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.S3.prepare(req) + if err != nil { + return nil, err + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. +func (b *Bucket) Exists(path string) (exists bool, err error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.S3.prepare(req) + if err != nil { + return + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + // We can treat a 403 or 404 as non existance + if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { + return false, nil + } + return false, err + } + + if resp.StatusCode/100 == 2 { + exists = true + } + return exists, err + } + return false, fmt.Errorf("S3 Currently Unreachable") +} + +// Head HEADs an object in the S3 bucket, returns the response with +// no body see http://bit.ly/17K1ylI +func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.S3.prepare(req) + if err != nil { + return nil, err + } + + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, err + } + return nil, fmt.Errorf("S3 Currently Unreachable") +} + +// Put inserts an object into the S3 bucket. +// +// See http://goo.gl/FEBPD for details. +func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { + body := bytes.NewBuffer(data) + return b.PutReader(path, body, int64(len(data)), contType, perm, options) +} + +// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + "x-amz-copy-source": {source}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + } + resp := &CopyObjectResult{} + err := b.S3.query(req, resp) + if err != nil { + return resp, err + } + return resp, nil +} + +/* +PutHeader - like Put, inserts an object into the S3 bucket. +Instead of Content-Type string, pass in custom headers to override defaults. +*/ +func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error { + body := bytes.NewBuffer(data) + return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm) +} + +// PutReader inserts an object into the S3 bucket by consuming data +// from r until EOF. +func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + "Content-Type": {contType}, + "x-amz-acl": {string(perm)}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.S3.query(req, nil) +} + +/* +PutReaderHeader - like PutReader, inserts an object into S3 from a reader. +Instead of Content-Type string, pass in custom headers to override defaults. +*/ +func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error { + // Default headers + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + "Content-Type": {"application/text"}, + "x-amz-acl": {string(perm)}, + } + + // Override with custom headers + for key, value := range customHeaders { + headers[key] = value + } + + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.S3.query(req, nil) +} + +// addHeaders adds o's specified fields to headers +func (o Options) addHeaders(headers map[string][]string) { + if o.SSE { + headers["x-amz-server-side-encryption"] = []string{"AES256"} + } + if len(o.ContentEncoding) != 0 { + headers["Content-Encoding"] = []string{o.ContentEncoding} + } + if len(o.CacheControl) != 0 { + headers["Cache-Control"] = []string{o.CacheControl} + } + if len(o.ContentMD5) != 0 { + headers["Content-MD5"] = []string{o.ContentMD5} + } + if len(o.RedirectLocation) != 0 { + headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} + } + for k, v := range o.Meta { + headers["x-amz-meta-"+k] = v + } +} + +// addHeaders adds o's specified fields to headers +func (o CopyOptions) addHeaders(headers map[string][]string) { + o.Options.addHeaders(headers) + if len(o.MetadataDirective) != 0 { + headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} + } + if len(o.ContentType) != 0 { + headers["Content-Type"] = []string{o.ContentType} + } +} + +func makeXmlBuffer(doc []byte) *bytes.Buffer { + buf := new(bytes.Buffer) + buf.WriteString(xml.Header) + buf.Write(doc) + return buf +} + +type RoutingRule struct { + ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` + RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` + RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` +} + +type WebsiteConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` + IndexDocumentSuffix string `xml:"IndexDocument>Suffix"` + ErrorDocumentKey string `xml:"ErrorDocument>Key"` + RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` +} + +func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { + + doc, err := xml.Marshal(configuration) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + + return b.PutBucketSubresource("website", buf, int64(buf.Len())) +} + +func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + } + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: r, + params: url.Values{subresource: {""}}, + } + + return b.S3.query(req, nil) +} + +// Del removes an object from the S3 bucket. +// +// See http://goo.gl/APeTt for details. +func (b *Bucket) Del(path string) error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: path, + } + return b.S3.query(req, nil) +} + +type Delete struct { + Quiet bool `xml:"Quiet,omitempty"` + Objects []Object `xml:"Object"` +} + +type Object struct { + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DelMulti removes up to 1000 objects from the S3 bucket. +// +// See http://goo.gl/jx6cWK for details. +func (b *Bucket) DelMulti(objects Delete) error { + doc, err := xml.Marshal(objects) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(int64(size), 10)}, + "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, + "Content-Type": {"text/xml"}, + } + req := &request{ + path: "/", + method: "POST", + params: url.Values{"delete": {""}}, + bucket: b.Name, + headers: headers, + payload: buf, + } + + return b.S3.query(req, nil) +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + NextMarker string + MaxKeys int + + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` +} + +// The Key type represents an item stored in an S3 bucket. +type Key struct { + Key string + LastModified string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// List returns information about objects in an S3 bucket. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// The marker parameter specifies the key to start with when listing objects +// in a bucket. Amazon S3 lists objects in alphabetical order and +// will return keys alphabetically greater than the marker. +// +// The max parameter specifies how many keys + common prefixes to return in +// the response. The default is 1000. +// +// For example, given these keys in a bucket: +// +// index.html +// index2.html +// photos/2006/January/sample.jpg +// photos/2006/February/sample2.jpg +// photos/2006/February/sample3.jpg +// photos/2006/February/sample4.jpg +// +// Listing this bucket with delimiter set to "/" would yield the +// following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Contents: []Key{ +// {Key: "index.html", "index2.html"}, +// }, +// CommonPrefixes: []string{ +// "photos/", +// }, +// } +// +// Listing the same bucket with delimiter set to "/" and prefix set to +// "photos/2006/" would yield the following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Prefix: "photos/2006/", +// CommonPrefixes: []string{ +// "photos/2006/February/", +// "photos/2006/January/", +// }, +// } +// +// See http://goo.gl/YjQTc for details. +func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { + params := map[string][]string{ + "prefix": {prefix}, + "delimiter": {delim}, + "marker": {marker}, + } + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &ListResp{} + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +// The VersionsResp type holds the results of a list bucket Versions operation. +type VersionsResp struct { + Name string + Prefix string + KeyMarker string + VersionIdMarker string + MaxKeys int + Delimiter string + IsTruncated bool + Versions []Version + CommonPrefixes []string `xml:">Prefix"` +} + +// The Version type represents an object version stored in an S3 bucket. +type Version struct { + Key string + VersionId string + IsLatest bool + LastModified string + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + Size int64 + Owner Owner + StorageClass string +} + +func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { + params := map[string][]string{ + "versions": {""}, + "prefix": {prefix}, + "delimiter": {delim}, + } + + if len(versionIdMarker) != 0 { + params["version-id-marker"] = []string{versionIdMarker} + } + if len(keyMarker) != 0 { + params["key-marker"] = []string{keyMarker} + } + + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &VersionsResp{} + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +// Returns a mapping of all key names in this bucket to Key objects +func (b *Bucket) GetBucketContents() (*map[string]Key, error) { + bucket_contents := map[string]Key{} + prefix := "" + path_separator := "" + marker := "" + for { + contents, err := b.List(prefix, path_separator, marker, 1000) + if err != nil { + return &bucket_contents, err + } + for _, key := range contents.Contents { + bucket_contents[key.Key] = key + } + if contents.IsTruncated { + marker = contents.NextMarker + } else { + break + } + } + + return &bucket_contents, nil +} + +// URL returns a non-signed URL that allows retriving the +// object at path. It only works if the object is publicly +// readable (see SignedURL). +func (b *Bucket) URL(path string) string { + req := &request{ + bucket: b.Name, + path: path, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + u.RawQuery = "" + return u.String() +} + +// SignedURL returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURL(path string, expires time.Time) string { + req := &request{ + bucket: b.Name, + path: path, + params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}}, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + if b.S3.Auth.Token() != "" { + return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) + } else { + return u.String() + } +} + +// UploadSignedURL returns a signed URL that allows anyone holding the URL +// to upload the object at path. The signature is valid until expires. +// contenttype is a string like image/png +// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself] +func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string { + expire_date := expires.Unix() + if method != "POST" { + method = "PUT" + } + stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path + fmt.Println("String to sign:\n", stringToSign) + a := b.S3.Auth + secretKey := a.SecretKey + accessId := a.AccessKey + mac := hmac.New(sha1.New, []byte(secretKey)) + mac.Write([]byte(stringToSign)) + macsum := mac.Sum(nil) + signature := base64.StdEncoding.EncodeToString([]byte(macsum)) + signature = strings.TrimSpace(signature) + + signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/") + if err != nil { + log.Println("ERROR sining url for S3 upload", err) + return "" + } + signedurl.Path += path + params := url.Values{} + params.Add("AWSAccessKeyId", accessId) + params.Add("Expires", strconv.FormatInt(expire_date, 10)) + params.Add("Signature", signature) + if a.Token() != "" { + params.Add("token", a.Token()) + } + + signedurl.RawQuery = params.Encode() + return signedurl.String() +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { + conditions := make([]string, 0) + fields = map[string]string{ + "AWSAccessKeyId": b.Auth.AccessKey, + "key": path, + } + + conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) + conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) + if redirect != "" { + conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) + fields["success_action_redirect"] = redirect + } + + vExpiration := expires.Format("2006-01-02T15:04:05Z") + vConditions := strings.Join(conditions, ",") + policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) + policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) + fields["policy"] = policy64 + + signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) + signer.Write([]byte(policy64)) + fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) + + action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) + return +} + +type request struct { + method string + bucket string + path string + signpath string + params url.Values + headers http.Header + baseurl string + payload io.Reader + prepared bool +} + +func (req *request) url() (*url.URL, error) { + u, err := url.Parse(req.baseurl) + if err != nil { + return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) + } + u.RawQuery = req.params.Encode() + u.Path = req.path + return u, nil +} + +// query prepares and runs the req request. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) query(req *request, resp interface{}) error { + err := s3.prepare(req) + if err == nil { + var httpResponse *http.Response + httpResponse, err = s3.run(req, resp) + if resp == nil && httpResponse != nil { + httpResponse.Body.Close() + } + } + return err +} + +// prepare sets up req to be delivered to S3. +func (s3 *S3) prepare(req *request) error { + var signpath = req.path + + if !req.prepared { + req.prepared = true + if req.method == "" { + req.method = "GET" + } + // Copy so they can be mutated without affecting on retries. + params := make(url.Values) + headers := make(http.Header) + for k, v := range req.params { + params[k] = v + } + for k, v := range req.headers { + headers[k] = v + } + req.params = params + req.headers = headers + if !strings.HasPrefix(req.path, "/") { + req.path = "/" + req.path + } + signpath = req.path + if req.bucket != "" { + req.baseurl = s3.Region.S3BucketEndpoint + if req.baseurl == "" { + // Use the path method to address the bucket. + req.baseurl = s3.Region.S3Endpoint + req.path = "/" + req.bucket + req.path + } else { + // Just in case, prevent injection. + if strings.IndexAny(req.bucket, "/:@") >= 0 { + return fmt.Errorf("bad S3 bucket: %q", req.bucket) + } + req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) + } + signpath = "/" + req.bucket + signpath + } + } + + // Always sign again as it's not clear how far the + // server has handled a previous attempt. + u, err := url.Parse(req.baseurl) + if err != nil { + return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) + } + reqSignpathSpaceFix := (&url.URL{Path: signpath}).String() + req.headers["Host"] = []string{u.Host} + req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} + if s3.Auth.Token() != "" { + req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} + } + sign(s3.Auth, req.method, reqSignpathSpaceFix, req.params, req.headers) + return nil +} + +// run sends req and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { + if debug { + log.Printf("Running S3 request: %#v", req) + } + + u, err := req.url() + if err != nil { + return nil, err + } + + hreq := http.Request{ + URL: u, + Method: req.method, + ProtoMajor: 1, + ProtoMinor: 1, + Close: true, + Header: req.headers, + } + + if v, ok := req.headers["Content-Length"]; ok { + hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) + delete(req.headers, "Content-Length") + } + if req.payload != nil { + hreq.Body = ioutil.NopCloser(req.payload) + } + + if s3.client == nil { + s3.client = &http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout) + if err != nil { + return + } + + var deadline time.Time + if s3.RequestTimeout > 0 { + deadline = time.Now().Add(s3.RequestTimeout) + c.SetDeadline(deadline) + } + + if s3.ReadTimeout > 0 || s3.WriteTimeout > 0 { + c = &ioTimeoutConn{ + TCPConn: c.(*net.TCPConn), + readTimeout: s3.ReadTimeout, + writeTimeout: s3.WriteTimeout, + requestDeadline: deadline, + } + } + return + }, + }, + } + } + + hresp, err := s3.client.Do(&hreq) + if err != nil { + return nil, err + } + if debug { + dump, _ := httputil.DumpResponse(hresp, true) + log.Printf("} -> %s\n", dump) + } + if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { + defer hresp.Body.Close() + return nil, buildError(hresp) + } + if resp != nil { + err = xml.NewDecoder(hresp.Body).Decode(resp) + hresp.Body.Close() + if debug { + log.Printf("goamz.s3> decoded xml into %#v", resp) + } + } + return hresp, err +} + +// Error represents an error in an operation with S3. +type Error struct { + StatusCode int // HTTP status code (200, 403, ...) + Code string // EC2 error code ("UnsupportedOperation", ...) + Message string // The human-oriented error message + BucketName string + RequestId string + HostId string +} + +func (e *Error) Error() string { + return e.Message +} + +func buildError(r *http.Response) error { + if debug { + log.Printf("got error (status code %v)", r.StatusCode) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("\tread error: %v", err) + } else { + log.Printf("\tdata:\n%s\n\n", data) + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + } + + err := Error{} + // TODO return error if Unmarshal fails? + xml.NewDecoder(r.Body).Decode(&err) + r.Body.Close() + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + if debug { + log.Printf("err: %#v\n", err) + } + return &err +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + if e, ok := err.(*url.Error); ok { + // Transport returns this string if it detects a write on a connection which + // has already had an error + if e.Err.Error() == "http: can't write HTTP request on broken connection" { + return true + } + err = e.Err + } + + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "read", "write", "WSARecv", "WSASend", "ConnectEx": + return true + } + case *Error: + switch e.Code { + case "InternalError", "NoSuchUpload", "NoSuchBucket": + return true + } + } + return false +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*Error) + return ok && s3err.Code == code +} + +// ioTimeoutConn is a net.Conn which sets a deadline for each Read or Write operation +type ioTimeoutConn struct { + *net.TCPConn + readTimeout time.Duration + writeTimeout time.Duration + requestDeadline time.Time +} + +func (c *ioTimeoutConn) deadline(timeout time.Duration) time.Time { + dl := time.Now().Add(timeout) + if c.requestDeadline.IsZero() || dl.Before(c.requestDeadline) { + return dl + } + + return c.requestDeadline +} + +func (c *ioTimeoutConn) Read(b []byte) (int, error) { + if c.readTimeout > 0 { + err := c.TCPConn.SetReadDeadline(c.deadline(c.readTimeout)) + if err != nil { + return 0, err + } + } + return c.TCPConn.Read(b) +} + +func (c *ioTimeoutConn) Write(b []byte) (int, error) { + if c.writeTimeout > 0 { + err := c.TCPConn.SetWriteDeadline(c.deadline(c.writeTimeout)) + if err != nil { + return 0, err + } + } + return c.TCPConn.Write(b) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go new file mode 100644 index 000000000..24d4dfcc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3_test.go @@ -0,0 +1,427 @@ +package s3_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/goamz/goamz/aws" + "github.com/goamz/goamz/s3" + "github.com/goamz/goamz/testutil" + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + TestingT(t) +} + +type S struct { + s3 *s3.S3 +} + +var _ = Suite(&S{}) + +var testServer = testutil.NewHTTPServer() + +func (s *S) SetUpSuite(c *C) { + testServer.Start() + auth := aws.Auth{AccessKey: "abc", SecretKey: "123"} + s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL}) +} + +func (s *S) TearDownSuite(c *C) { + s.s3.AttemptStrategy = s3.DefaultAttemptStrategy +} + +func (s *S) SetUpTest(c *C) { + s.s3.AttemptStrategy = aws.AttemptStrategy{ + Total: 300 * time.Millisecond, + Delay: 100 * time.Millisecond, + } +} + +func (s *S) TearDownTest(c *C) { + testServer.Flush() +} + +func (s *S) DisableRetries() { + s.s3.AttemptStrategy = aws.AttemptStrategy{} +} + +// PutBucket docs: http://goo.gl/kBTCu + +func (s *S) TestPutBucket(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/bucket/") + c.Assert(req.Header["Date"], Not(Equals), "") +} + +// Head docs: http://bit.ly/17K1ylI + +func (s *S) TestHead(c *C) { + testServer.Response(200, nil, "content") + + b := s.s3.Bucket("bucket") + resp, err := b.Head("name", nil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "HEAD") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(Equals), "") + + c.Assert(err, IsNil) + c.Assert(resp.ContentLength, FitsTypeOf, int64(0)) + c.Assert(resp, FitsTypeOf, &http.Response{}) +} + +// DeleteBucket docs: http://goo.gl/GoBrY + +func (s *S) TestDelBucket(c *C) { + testServer.Response(204, nil, "") + + b := s.s3.Bucket("bucket") + err := b.DelBucket() + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "DELETE") + c.Assert(req.URL.Path, Equals, "/bucket/") + c.Assert(req.Header["Date"], Not(Equals), "") +} + +// GetObject docs: http://goo.gl/isCO7 + +func (s *S) TestGet(c *C) { + testServer.Response(200, nil, "content") + + b := s.s3.Bucket("bucket") + data, err := b.Get("name") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(Equals), "") + + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "content") +} + +func (s *S) TestURL(c *C) { + testServer.Response(200, nil, "content") + + b := s.s3.Bucket("bucket") + url := b.URL("name") + r, err := http.Get(url) + c.Assert(err, IsNil) + data, err := ioutil.ReadAll(r.Body) + r.Body.Close() + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "content") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/bucket/name") +} + +func (s *S) TestGetReader(c *C) { + testServer.Response(200, nil, "content") + + b := s.s3.Bucket("bucket") + rc, err := b.GetReader("name") + c.Assert(err, IsNil) + data, err := ioutil.ReadAll(rc) + rc.Close() + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "content") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(Equals), "") +} + +func (s *S) TestGetNotFound(c *C) { + for i := 0; i < 10; i++ { + testServer.Response(404, nil, GetObjectErrorDump) + } + + b := s.s3.Bucket("non-existent-bucket") + data, err := b.Get("non-existent") + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent") + c.Assert(req.Header["Date"], Not(Equals), "") + + s3err, _ := err.(*s3.Error) + c.Assert(s3err, NotNil) + c.Assert(s3err.StatusCode, Equals, 404) + c.Assert(s3err.BucketName, Equals, "non-existent-bucket") + c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8") + c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D") + c.Assert(s3err.Code, Equals, "NoSuchBucket") + c.Assert(s3err.Message, Equals, "The specified bucket does not exist") + c.Assert(s3err.Error(), Equals, "The specified bucket does not exist") + c.Assert(data, IsNil) +} + +// PutObject docs: http://goo.gl/FEBPD + +func (s *S) TestPutObject(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{}) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) + c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) + //c.Assert(req.Header["Content-MD5"], DeepEquals, "...") + c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) +} + +func (s *S) TestPutObjectReadTimeout(c *C) { + s.s3.ReadTimeout = 50 * time.Millisecond + defer func() { + s.s3.ReadTimeout = 0 + }() + + b := s.s3.Bucket("bucket") + err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{}) + + // Make sure that we get a timeout error. + c.Assert(err, NotNil) + + // Set the response after the request times out so that the next request will work. + testServer.Response(200, nil, "") + + // This time set the response within our timeout period so that we expect the call + // to return successfully. + go func() { + time.Sleep(25 * time.Millisecond) + testServer.Response(200, nil, "") + }() + err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{}) + c.Assert(err, IsNil) +} + +func (s *S) TestPutObjectHeader(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + err := b.PutHeader( + "name", + []byte("content"), + map[string][]string{"Content-Type": {"content-type"}}, + s3.Private, + ) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) + c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) + //c.Assert(req.Header["Content-MD5"], DeepEquals, "...") + c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) +} + +func (s *S) TestPutReader(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + buf := bytes.NewBufferString("content") + err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{}) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) + c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) + //c.Assert(req.Header["Content-MD5"], Equals, "...") + c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) +} + +func (s *S) TestPutReaderHeader(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + buf := bytes.NewBufferString("content") + err := b.PutReaderHeader( + "name", + buf, + int64(buf.Len()), + map[string][]string{"Content-Type": {"content-type"}}, + s3.Private, + ) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "PUT") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) + c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) + c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) + //c.Assert(req.Header["Content-MD5"], Equals, "...") + c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) +} + +// DelObject docs: http://goo.gl/APeTt + +func (s *S) TestDelObject(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + err := b.Del("name") + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "DELETE") + c.Assert(req.URL.Path, Equals, "/bucket/name") + c.Assert(req.Header["Date"], Not(Equals), "") +} + +func (s *S) TestDelMultiObjects(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + objects := []s3.Object{s3.Object{Key: "test"}} + err := b.DelMulti(s3.Delete{ + Quiet: false, + Objects: objects, + }) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "POST") + c.Assert(req.URL.RawQuery, Equals, "delete=") + c.Assert(req.Header["Date"], Not(Equals), "") + c.Assert(req.Header["Content-MD5"], Not(Equals), "") + c.Assert(req.Header["Content-Type"], Not(Equals), "") + c.Assert(req.ContentLength, Not(Equals), "") +} + +// Bucket List Objects docs: http://goo.gl/YjQTc + +func (s *S) TestList(c *C) { + testServer.Response(200, nil, GetListResultDump1) + + b := s.s3.Bucket("quotes") + + data, err := b.List("N", "", "", 0) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/quotes/") + c.Assert(req.Header["Date"], Not(Equals), "") + c.Assert(req.Form["prefix"], DeepEquals, []string{"N"}) + c.Assert(req.Form["delimiter"], DeepEquals, []string{""}) + c.Assert(req.Form["marker"], DeepEquals, []string{""}) + c.Assert(req.Form["max-keys"], DeepEquals, []string(nil)) + + c.Assert(data.Name, Equals, "quotes") + c.Assert(data.Prefix, Equals, "N") + c.Assert(data.IsTruncated, Equals, false) + c.Assert(len(data.Contents), Equals, 2) + + c.Assert(data.Contents[0].Key, Equals, "Nelson") + c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z") + c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) + c.Assert(data.Contents[0].Size, Equals, int64(5)) + c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD") + c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f") + c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile") + + c.Assert(data.Contents[1].Key, Equals, "Neo") + c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z") + c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) + c.Assert(data.Contents[1].Size, Equals, int64(4)) + c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD") + c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f") + c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile") +} + +func (s *S) TestListWithDelimiter(c *C) { + testServer.Response(200, nil, GetListResultDump2) + + b := s.s3.Bucket("quotes") + + data, err := b.List("photos/2006/", "/", "some-marker", 1000) + c.Assert(err, IsNil) + + req := testServer.WaitRequest() + c.Assert(req.Method, Equals, "GET") + c.Assert(req.URL.Path, Equals, "/quotes/") + c.Assert(req.Header["Date"], Not(Equals), "") + c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"}) + c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) + c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"}) + c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"}) + + c.Assert(data.Name, Equals, "example-bucket") + c.Assert(data.Prefix, Equals, "photos/2006/") + c.Assert(data.Delimiter, Equals, "/") + c.Assert(data.Marker, Equals, "some-marker") + c.Assert(data.IsTruncated, Equals, false) + c.Assert(len(data.Contents), Equals, 0) + c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"}) +} + +func (s *S) TestExists(c *C) { + testServer.Response(200, nil, "") + + b := s.s3.Bucket("bucket") + result, err := b.Exists("name") + + req := testServer.WaitRequest() + + c.Assert(req.Method, Equals, "HEAD") + + c.Assert(err, IsNil) + c.Assert(result, Equals, true) +} + +func (s *S) TestExistsNotFound404(c *C) { + testServer.Response(404, nil, "") + + b := s.s3.Bucket("bucket") + result, err := b.Exists("name") + + req := testServer.WaitRequest() + + c.Assert(req.Method, Equals, "HEAD") + + c.Assert(err, IsNil) + c.Assert(result, Equals, false) +} + +func (s *S) TestExistsNotFound403(c *C) { + testServer.Response(403, nil, "") + + b := s.s3.Bucket("bucket") + result, err := b.Exists("name") + + req := testServer.WaitRequest() + + c.Assert(req.Method, Equals, "HEAD") + + c.Assert(err, IsNil) + c.Assert(result, Equals, false) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go new file mode 100644 index 000000000..1b898efc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3i_test.go @@ -0,0 +1,590 @@ +package s3_test + +import ( + "bytes" + "crypto/md5" + "fmt" + "io/ioutil" + "net" + "net/http" + "sort" + "strings" + "time" + + "github.com/goamz/goamz/aws" + "github.com/goamz/goamz/s3" + "github.com/goamz/goamz/testutil" + . "gopkg.in/check.v1" +) + +// AmazonServer represents an Amazon S3 server. +type AmazonServer struct { + auth aws.Auth +} + +func (s *AmazonServer) SetUp(c *C) { + auth, err := aws.EnvAuth() + if err != nil { + c.Fatal(err.Error()) + } + s.auth = auth +} + +var _ = Suite(&AmazonClientSuite{Region: aws.USEast}) +var _ = Suite(&AmazonClientSuite{Region: aws.EUWest}) +var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast}) + +// AmazonClientSuite tests the client against a live S3 server. +type AmazonClientSuite struct { + aws.Region + srv AmazonServer + ClientTests +} + +func (s *AmazonClientSuite) SetUpSuite(c *C) { + if !testutil.Amazon { + c.Skip("live tests against AWS disabled (no -amazon)") + } + s.srv.SetUp(c) + s.s3 = s3.New(s.srv.auth, s.Region) + // In case tests were interrupted in the middle before. + s.ClientTests.Cleanup() +} + +func (s *AmazonClientSuite) TearDownTest(c *C) { + s.ClientTests.Cleanup() +} + +// AmazonDomainClientSuite tests the client against a live S3 +// server using bucket names in the endpoint domain name rather +// than the request path. +type AmazonDomainClientSuite struct { + aws.Region + srv AmazonServer + ClientTests +} + +func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { + if !testutil.Amazon { + c.Skip("live tests against AWS disabled (no -amazon)") + } + s.srv.SetUp(c) + region := s.Region + region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" + s.s3 = s3.New(s.srv.auth, region) + s.ClientTests.Cleanup() +} + +func (s *AmazonDomainClientSuite) TearDownTest(c *C) { + s.ClientTests.Cleanup() +} + +// ClientTests defines integration tests designed to test the client. +// It is not used as a test suite in itself, but embedded within +// another type. +type ClientTests struct { + s3 *s3.S3 + authIsBroken bool +} + +func (s *ClientTests) Cleanup() { + killBucket(testBucket(s.s3)) +} + +func testBucket(s *s3.S3) *s3.Bucket { + // Watch out! If this function is corrupted and made to match with something + // people own, killBucket will happily remove *everything* inside the bucket. + key := s.Auth.AccessKey + if len(key) >= 8 { + key = s.Auth.AccessKey[:8] + } + return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key)) +} + +var attempts = aws.AttemptStrategy{ + Min: 5, + Total: 20 * time.Second, + Delay: 100 * time.Millisecond, +} + +func killBucket(b *s3.Bucket) { + var err error + for attempt := attempts.Start(); attempt.Next(); { + err = b.DelBucket() + if err == nil { + return + } + if _, ok := err.(*net.DNSError); ok { + return + } + e, ok := err.(*s3.Error) + if ok && e.Code == "NoSuchBucket" { + return + } + if ok && e.Code == "BucketNotEmpty" { + // Errors are ignored here. Just retry. + resp, err := b.List("", "", "", 1000) + if err == nil { + for _, key := range resp.Contents { + _ = b.Del(key.Key) + } + } + multis, _, _ := b.ListMulti("", "") + for _, m := range multis { + _ = m.Abort() + } + } + } + message := "cannot delete test bucket" + if err != nil { + message += ": " + err.Error() + } + panic(message) +} + +func get(url string) ([]byte, error) { + for attempt := attempts.Start(); attempt.Next(); { + resp, err := http.Get(url) + if err != nil { + if attempt.HasNext() { + continue + } + return nil, err + } + data, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if attempt.HasNext() { + continue + } + return nil, err + } + return data, err + } + panic("unreachable") +} + +func (s *ClientTests) TestBasicFunctionality(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.PublicRead) + c.Assert(err, IsNil) + + err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{}) + c.Assert(err, IsNil) + defer b.Del("name") + + data, err := b.Get("name") + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "yo!") + + data, err = get(b.URL("name")) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "yo!") + + buf := bytes.NewBufferString("hey!") + err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{}) + c.Assert(err, IsNil) + defer b.Del("name2") + + rc, err := b.GetReader("name2") + c.Assert(err, IsNil) + data, err = ioutil.ReadAll(rc) + c.Check(err, IsNil) + c.Check(string(data), Equals, "hey!") + rc.Close() + + data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour))) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hey!") + + if !s.authIsBroken { + data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour))) + c.Assert(err, IsNil) + c.Assert(string(data), Matches, "(?s).*AccessDenied.*") + } + + err = b.DelBucket() + c.Assert(err, NotNil) + + s3err, ok := err.(*s3.Error) + c.Assert(ok, Equals, true) + c.Assert(s3err.Code, Equals, "BucketNotEmpty") + c.Assert(s3err.BucketName, Equals, b.Name) + c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty") + + err = b.Del("name") + c.Assert(err, IsNil) + err = b.Del("name2") + c.Assert(err, IsNil) + + err = b.DelBucket() + c.Assert(err, IsNil) +} + +func (s *ClientTests) TestGetNotFound(c *C) { + b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey) + data, err := b.Get("non-existent") + + s3err, _ := err.(*s3.Error) + c.Assert(s3err, NotNil) + c.Assert(s3err.StatusCode, Equals, 404) + c.Assert(s3err.Code, Equals, "NoSuchBucket") + c.Assert(s3err.Message, Equals, "The specified bucket does not exist") + c.Assert(data, IsNil) +} + +// Communicate with all endpoints to see if they are alive. +func (s *ClientTests) TestRegions(c *C) { + errs := make(chan error, len(aws.Regions)) + for _, region := range aws.Regions { + go func(r aws.Region) { + s := s3.New(s.s3.Auth, r) + b := s.Bucket("goamz-" + s.Auth.AccessKey) + _, err := b.Get("non-existent") + errs <- err + }(region) + } + for _ = range aws.Regions { + err := <-errs + if err != nil { + s3_err, ok := err.(*s3.Error) + if ok { + c.Check(s3_err.Code, Matches, "NoSuchBucket") + } else if _, ok = err.(*net.DNSError); ok { + // Okay as well. + } else { + c.Errorf("Non-S3 error: %s", err) + } + } else { + c.Errorf("Test should have errored but it seems to have succeeded") + } + } +} + +var objectNames = []string{ + "index.html", + "index2.html", + "photos/2006/February/sample2.jpg", + "photos/2006/February/sample3.jpg", + "photos/2006/February/sample4.jpg", + "photos/2006/January/sample.jpg", + "test/bar", + "test/foo", +} + +func keys(names ...string) []s3.Key { + ks := make([]s3.Key, len(names)) + for i, name := range names { + ks[i].Key = name + } + return ks +} + +// As the ListResp specifies all the parameters to the +// request too, we use it to specify request parameters +// and expected results. The Contents field is +// used only for the key names inside it. +var listTests = []s3.ListResp{ + // normal list. + { + Contents: keys(objectNames...), + }, { + Marker: objectNames[0], + Contents: keys(objectNames[1:]...), + }, { + Marker: objectNames[0] + "a", + Contents: keys(objectNames[1:]...), + }, { + Marker: "z", + }, + + // limited results. + { + MaxKeys: 2, + Contents: keys(objectNames[0:2]...), + IsTruncated: true, + }, { + MaxKeys: 2, + Marker: objectNames[0], + Contents: keys(objectNames[1:3]...), + IsTruncated: true, + }, { + MaxKeys: 2, + Marker: objectNames[len(objectNames)-2], + Contents: keys(objectNames[len(objectNames)-1:]...), + }, + + // with delimiter + { + Delimiter: "/", + CommonPrefixes: []string{"photos/", "test/"}, + Contents: keys("index.html", "index2.html"), + }, { + Delimiter: "/", + Prefix: "photos/2006/", + CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"}, + }, { + Delimiter: "/", + Prefix: "t", + CommonPrefixes: []string{"test/"}, + }, { + Delimiter: "/", + MaxKeys: 1, + Contents: keys("index.html"), + IsTruncated: true, + }, { + Delimiter: "/", + MaxKeys: 1, + Marker: "index2.html", + CommonPrefixes: []string{"photos/"}, + IsTruncated: true, + }, { + Delimiter: "/", + MaxKeys: 1, + Marker: "photos/", + CommonPrefixes: []string{"test/"}, + IsTruncated: false, + }, { + Delimiter: "Feb", + CommonPrefixes: []string{"photos/2006/Feb"}, + Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"), + }, +} + +func (s *ClientTests) TestDoublePutBucket(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.PublicRead) + c.Assert(err, IsNil) + + err = b.PutBucket(s3.PublicRead) + if err != nil { + c.Assert(err, FitsTypeOf, new(s3.Error)) + c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou") + } +} + +func (s *ClientTests) TestBucketList(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + objData := make(map[string][]byte) + for i, path := range objectNames { + data := []byte(strings.Repeat("a", i)) + err := b.Put(path, data, "text/plain", s3.Private, s3.Options{}) + c.Assert(err, IsNil) + defer b.Del(path) + objData[path] = data + } + + for i, t := range listTests { + c.Logf("test %d", i) + resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys) + c.Assert(err, IsNil) + c.Check(resp.Name, Equals, b.Name) + c.Check(resp.Delimiter, Equals, t.Delimiter) + c.Check(resp.IsTruncated, Equals, t.IsTruncated) + c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes) + checkContents(c, resp.Contents, objData, t.Contents) + } +} + +func etag(data []byte) string { + sum := md5.New() + sum.Write(data) + return fmt.Sprintf(`"%x"`, sum.Sum(nil)) +} + +func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) { + c.Assert(contents, HasLen, len(expected)) + for i, k := range contents { + c.Check(k.Key, Equals, expected[i].Key) + // TODO mtime + c.Check(k.Size, Equals, int64(len(data[k.Key]))) + c.Check(k.ETag, Equals, etag(data[k.Key])) + } +} + +func (s *ClientTests) TestMultiInitPutList(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + c.Assert(multi.UploadId, Matches, ".+") + defer multi.Abort() + + var sent []s3.Part + + for i := 0; i < 5; i++ { + p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("", i+1))) + c.Assert(err, IsNil) + c.Assert(p.N, Equals, i+1) + c.Assert(p.Size, Equals, int64(8)) + c.Assert(p.ETag, Matches, ".+") + sent = append(sent, p) + } + + s3.SetListPartsMax(2) + + parts, err := multi.ListParts() + c.Assert(err, IsNil) + c.Assert(parts, HasLen, len(sent)) + for i := range parts { + c.Assert(parts[i].N, Equals, sent[i].N) + c.Assert(parts[i].Size, Equals, sent[i].Size) + c.Assert(parts[i].ETag, Equals, sent[i].ETag) + } + + err = multi.Complete(parts) + s3err, failed := err.(*s3.Error) + c.Assert(failed, Equals, true) + c.Assert(s3err.Code, Equals, "EntityTooSmall") + + err = multi.Abort() + c.Assert(err, IsNil) + _, err = multi.ListParts() + s3err, ok := err.(*s3.Error) + c.Assert(ok, Equals, true) + c.Assert(s3err.Code, Equals, "NoSuchUpload") +} + +// This may take a minute or more due to the minimum size accepted S3 +// on multipart upload parts. +func (s *ClientTests) TestMultiComplete(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + c.Assert(multi.UploadId, Matches, ".+") + defer multi.Abort() + + // Minimum size S3 accepts for all but the last part is 5MB. + data1 := make([]byte, 5*1024*1024) + data2 := []byte("") + + part1, err := multi.PutPart(1, bytes.NewReader(data1)) + c.Assert(err, IsNil) + part2, err := multi.PutPart(2, bytes.NewReader(data2)) + c.Assert(err, IsNil) + + // Purposefully reversed. The order requirement must be handled. + err = multi.Complete([]s3.Part{part2, part1}) + c.Assert(err, IsNil) + + data, err := b.Get("multi") + c.Assert(err, IsNil) + + c.Assert(len(data), Equals, len(data1)+len(data2)) + for i := range data1 { + if data[i] != data1[i] { + c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i]) + } + } + c.Assert(string(data[len(data1):]), Equals, string(data2)) +} + +type multiList []*s3.Multi + +func (l multiList) Len() int { return len(l) } +func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key } +func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +func (s *ClientTests) TestListMulti(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + // Ensure an empty state before testing its behavior. + multis, _, err := b.ListMulti("", "") + for _, m := range multis { + err := m.Abort() + c.Assert(err, IsNil) + } + + keys := []string{ + "a/multi2", + "a/multi3", + "b/multi4", + "multi1", + } + for _, key := range keys { + m, err := b.InitMulti(key, "", s3.Private) + c.Assert(err, IsNil) + defer m.Abort() + } + + // Amazon's implementation of the multiple-request listing for + // multipart uploads in progress seems broken in multiple ways. + // (next tokens are not provided, etc). + //s3.SetListMultiMax(2) + + multis, prefixes, err := b.ListMulti("", "") + c.Assert(err, IsNil) + for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); { + multis, prefixes, err = b.ListMulti("", "") + c.Assert(err, IsNil) + } + sort.Sort(multiList(multis)) + c.Assert(prefixes, IsNil) + var gotKeys []string + for _, m := range multis { + gotKeys = append(gotKeys, m.Key) + } + c.Assert(gotKeys, DeepEquals, keys) + for _, m := range multis { + c.Assert(m.Bucket, Equals, b) + c.Assert(m.UploadId, Matches, ".+") + } + + multis, prefixes, err = b.ListMulti("", "/") + for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; { + multis, prefixes, err = b.ListMulti("", "") + c.Assert(err, IsNil) + } + c.Assert(err, IsNil) + c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) + c.Assert(multis, HasLen, 1) + c.Assert(multis[0].Bucket, Equals, b) + c.Assert(multis[0].Key, Equals, "multi1") + c.Assert(multis[0].UploadId, Matches, ".+") + + for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; { + multis, prefixes, err = b.ListMulti("", "") + c.Assert(err, IsNil) + } + multis, prefixes, err = b.ListMulti("a/", "/") + c.Assert(err, IsNil) + c.Assert(prefixes, IsNil) + c.Assert(multis, HasLen, 2) + c.Assert(multis[0].Bucket, Equals, b) + c.Assert(multis[0].Key, Equals, "a/multi2") + c.Assert(multis[0].UploadId, Matches, ".+") + c.Assert(multis[1].Bucket, Equals, b) + c.Assert(multis[1].Key, Equals, "a/multi3") + c.Assert(multis[1].UploadId, Matches, ".+") +} + +func (s *ClientTests) TestMultiPutAllZeroLength(c *C) { + b := testBucket(s.s3) + err := b.PutBucket(s3.Private) + c.Assert(err, IsNil) + + multi, err := b.InitMulti("multi", "text/plain", s3.Private) + c.Assert(err, IsNil) + defer multi.Abort() + + // This tests an edge case. Amazon requires at least one + // part for multiprat uploads to work, even the part is empty. + parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024) + c.Assert(err, IsNil) + c.Assert(parts, HasLen, 1) + c.Assert(parts[0].Size, Equals, int64(0)) + c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`) + + err = multi.Complete(parts) + c.Assert(err, IsNil) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go new file mode 100644 index 000000000..e98c50b29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3t_test.go @@ -0,0 +1,79 @@ +package s3_test + +import ( + "github.com/goamz/goamz/aws" + "github.com/goamz/goamz/s3" + "github.com/goamz/goamz/s3/s3test" + . "gopkg.in/check.v1" +) + +type LocalServer struct { + auth aws.Auth + region aws.Region + srv *s3test.Server + config *s3test.Config +} + +func (s *LocalServer) SetUp(c *C) { + srv, err := s3test.NewServer(s.config) + c.Assert(err, IsNil) + c.Assert(srv, NotNil) + + s.srv = srv + s.region = aws.Region{ + Name: "faux-region-1", + S3Endpoint: srv.URL(), + S3LocationConstraint: true, // s3test server requires a LocationConstraint + } +} + +// LocalServerSuite defines tests that will run +// against the local s3test server. It includes +// selected tests from ClientTests; +// when the s3test functionality is sufficient, it should +// include all of them, and ClientTests can be simply embedded. +type LocalServerSuite struct { + srv LocalServer + clientTests ClientTests +} + +var ( + // run tests twice, once in us-east-1 mode, once not. + _ = Suite(&LocalServerSuite{}) + _ = Suite(&LocalServerSuite{ + srv: LocalServer{ + config: &s3test.Config{ + Send409Conflict: true, + }, + }, + }) +) + +func (s *LocalServerSuite) SetUpSuite(c *C) { + s.srv.SetUp(c) + s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) + + // TODO Sadly the fake server ignores auth completely right now. :-( + s.clientTests.authIsBroken = true + s.clientTests.Cleanup() +} + +func (s *LocalServerSuite) TearDownTest(c *C) { + s.clientTests.Cleanup() +} + +func (s *LocalServerSuite) TestBasicFunctionality(c *C) { + s.clientTests.TestBasicFunctionality(c) +} + +func (s *LocalServerSuite) TestGetNotFound(c *C) { + s.clientTests.TestGetNotFound(c) +} + +func (s *LocalServerSuite) TestBucketList(c *C) { + s.clientTests.TestBucketList(c) +} + +func (s *LocalServerSuite) TestDoublePutBucket(c *C) { + s.clientTests.TestDoublePutBucket(c) +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go new file mode 100644 index 000000000..10d36924f --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/s3test/server.go @@ -0,0 +1,629 @@ +package s3test + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "github.com/goamz/goamz/s3" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const debug = false + +type s3Error struct { + statusCode int + XMLName struct{} `xml:"Error"` + Code string + Message string + BucketName string + RequestId string + HostId string +} + +type action struct { + srv *Server + w http.ResponseWriter + req *http.Request + reqId string +} + +// Config controls the internal behaviour of the Server. A nil config is the default +// and behaves as if all configurations assume their default behaviour. Once passed +// to NewServer, the configuration must not be modified. +type Config struct { + // Send409Conflict controls how the Server will respond to calls to PUT on a + // previously existing bucket. The default is false, and corresponds to the + // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of + // all other regions. + // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html + Send409Conflict bool +} + +func (c *Config) send409Conflict() bool { + if c != nil { + return c.Send409Conflict + } + return false +} + +// Server is a fake S3 server for testing purposes. +// All of the data for the server is kept in memory. +type Server struct { + url string + reqId int + listener net.Listener + mu sync.Mutex + buckets map[string]*bucket + config *Config +} + +type bucket struct { + name string + acl s3.ACL + ctime time.Time + objects map[string]*object +} + +type object struct { + name string + mtime time.Time + meta http.Header // metadata to return with requests. + checksum []byte // also held as Content-MD5 in meta. + data []byte +} + +// A resource encapsulates the subject of an HTTP request. +// The resource referred to may or may not exist +// when the request is made. +type resource interface { + put(a *action) interface{} + get(a *action) interface{} + post(a *action) interface{} + delete(a *action) interface{} +} + +func NewServer(config *Config) (*Server, error) { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("cannot listen on localhost: %v", err) + } + srv := &Server{ + listener: l, + url: "http://" + l.Addr().String(), + buckets: make(map[string]*bucket), + config: config, + } + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + srv.serveHTTP(w, req) + })) + return srv, nil +} + +// Quit closes down the server. +func (srv *Server) Quit() { + srv.listener.Close() +} + +// URL returns a URL for the server. +func (srv *Server) URL() string { + return srv.url +} + +func fatalf(code int, codeStr string, errf string, a ...interface{}) { + panic(&s3Error{ + statusCode: code, + Code: codeStr, + Message: fmt.Sprintf(errf, a...), + }) +} + +// serveHTTP serves the S3 protocol. +func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { + // ignore error from ParseForm as it's usually spurious. + req.ParseForm() + + srv.mu.Lock() + defer srv.mu.Unlock() + + if debug { + log.Printf("s3test %q %q", req.Method, req.URL) + } + a := &action{ + srv: srv, + w: w, + req: req, + reqId: fmt.Sprintf("%09X", srv.reqId), + } + srv.reqId++ + + var r resource + defer func() { + switch err := recover().(type) { + case *s3Error: + switch r := r.(type) { + case objectResource: + err.BucketName = r.bucket.name + case bucketResource: + err.BucketName = r.name + } + err.RequestId = a.reqId + // TODO HostId + w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) + w.WriteHeader(err.statusCode) + xmlMarshal(w, err) + case nil: + default: + panic(err) + } + }() + + r = srv.resourceForURL(req.URL) + + var resp interface{} + switch req.Method { + case "PUT": + resp = r.put(a) + case "GET", "HEAD": + resp = r.get(a) + case "DELETE": + resp = r.delete(a) + case "POST": + resp = r.post(a) + default: + fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) + } + if resp != nil && req.Method != "HEAD" { + xmlMarshal(w, resp) + } +} + +// xmlMarshal is the same as xml.Marshal except that +// it panics on error. The marshalling should not fail, +// but we want to know if it does. +func xmlMarshal(w io.Writer, x interface{}) { + if err := xml.NewEncoder(w).Encode(x); err != nil { + panic(fmt.Errorf("error marshalling %#v: %v", x, err)) + } +} + +// In a fully implemented test server, each of these would have +// its own resource type. +var unimplementedBucketResourceNames = map[string]bool{ + "acl": true, + "lifecycle": true, + "policy": true, + "location": true, + "logging": true, + "notification": true, + "versions": true, + "requestPayment": true, + "versioning": true, + "website": true, + "uploads": true, +} + +var unimplementedObjectResourceNames = map[string]bool{ + "uploadId": true, + "acl": true, + "torrent": true, + "uploads": true, +} + +var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") + +// resourceForURL returns a resource object for the given URL. +func (srv *Server) resourceForURL(u *url.URL) (r resource) { + m := pathRegexp.FindStringSubmatch(u.Path) + if m == nil { + fatalf(404, "InvalidURI", "Couldn't parse the specified URI") + } + bucketName := m[2] + objectName := m[4] + if bucketName == "" { + return nullResource{} // root + } + b := bucketResource{ + name: bucketName, + bucket: srv.buckets[bucketName], + } + q := u.Query() + if objectName == "" { + for name := range q { + if unimplementedBucketResourceNames[name] { + return nullResource{} + } + } + return b + + } + if b.bucket == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + objr := objectResource{ + name: objectName, + version: q.Get("versionId"), + bucket: b.bucket, + } + for name := range q { + if unimplementedObjectResourceNames[name] { + return nullResource{} + } + } + if obj := objr.bucket.objects[objr.name]; obj != nil { + objr.object = obj + } + return objr +} + +// nullResource has error stubs for all resource methods. +type nullResource struct{} + +func notAllowed() interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +func (nullResource) put(a *action) interface{} { return notAllowed() } +func (nullResource) get(a *action) interface{} { return notAllowed() } +func (nullResource) post(a *action) interface{} { return notAllowed() } +func (nullResource) delete(a *action) interface{} { return notAllowed() } + +const timeFormat = "2006-01-02T15:04:05.000Z07:00" + +type bucketResource struct { + name string + bucket *bucket // non-nil if the bucket already exists. +} + +// GET on a bucket lists the objects in the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html +func (r bucketResource) get(a *action) interface{} { + if r.bucket == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + delimiter := a.req.Form.Get("delimiter") + marker := a.req.Form.Get("marker") + maxKeys := -1 + if s := a.req.Form.Get("max-keys"); s != "" { + i, err := strconv.Atoi(s) + if err != nil || i < 0 { + fatalf(400, "invalid value for max-keys: %q", s) + } + maxKeys = i + } + prefix := a.req.Form.Get("prefix") + a.w.Header().Set("Content-Type", "application/xml") + + if a.req.Method == "HEAD" { + return nil + } + + var objs orderedObjects + + // first get all matching objects and arrange them in alphabetical order. + for name, obj := range r.bucket.objects { + if strings.HasPrefix(name, prefix) { + objs = append(objs, obj) + } + } + sort.Sort(objs) + + if maxKeys <= 0 { + maxKeys = 1000 + } + resp := &s3.ListResp{ + Name: r.bucket.name, + Prefix: prefix, + Delimiter: delimiter, + Marker: marker, + MaxKeys: maxKeys, + } + + var prefixes []string + for _, obj := range objs { + if !strings.HasPrefix(obj.name, prefix) { + continue + } + name := obj.name + isPrefix := false + if delimiter != "" { + if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { + name = obj.name[:len(prefix)+i+len(delimiter)] + if prefixes != nil && prefixes[len(prefixes)-1] == name { + continue + } + isPrefix = true + } + } + if name <= marker { + continue + } + if len(resp.Contents)+len(prefixes) >= maxKeys { + resp.IsTruncated = true + break + } + if isPrefix { + prefixes = append(prefixes, name) + } else { + // Contents contains only keys not found in CommonPrefixes + resp.Contents = append(resp.Contents, obj.s3Key()) + } + } + resp.CommonPrefixes = prefixes + return resp +} + +// orderedObjects holds a slice of objects that can be sorted +// by name. +type orderedObjects []*object + +func (s orderedObjects) Len() int { + return len(s) +} +func (s orderedObjects) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedObjects) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (obj *object) s3Key() s3.Key { + return s3.Key{ + Key: obj.name, + LastModified: obj.mtime.Format(timeFormat), + Size: int64(len(obj.data)), + ETag: fmt.Sprintf(`"%x"`, obj.checksum), + // TODO StorageClass + // TODO Owner + } +} + +// DELETE on a bucket deletes the bucket if it's not empty. +func (r bucketResource) delete(a *action) interface{} { + b := r.bucket + if b == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + if len(b.objects) > 0 { + fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") + } + delete(a.srv.buckets, b.name) + return nil +} + +// PUT on a bucket creates the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html +func (r bucketResource) put(a *action) interface{} { + var created bool + if r.bucket == nil { + if !validBucketName(r.name) { + fatalf(400, "InvalidBucketName", "The specified bucket is not valid") + } + if loc := locationConstraint(a); loc == "" { + fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") + } + // TODO validate acl + r.bucket = &bucket{ + name: r.name, + // TODO default acl + objects: make(map[string]*object), + } + a.srv.buckets[r.name] = r.bucket + created = true + } + if !created && a.srv.config.send409Conflict() { + fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") + } + r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) + return nil +} + +func (bucketResource) post(a *action) interface{} { + fatalf(400, "Method", "bucket POST method not available") + return nil +} + +// validBucketName returns whether name is a valid bucket name. +// Here are the rules, from: +// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html +// +// Can contain lowercase letters, numbers, periods (.), underscores (_), +// and dashes (-). You can use uppercase letters for buckets only in the +// US Standard region. +// +// Must start with a number or letter +// +// Must be between 3 and 255 characters long +// +// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) +// but the real S3 server does not seem to check that rule, so we will not +// check it either. +// +func validBucketName(name string) bool { + if len(name) < 3 || len(name) > 255 { + return false + } + r := name[0] + if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { + return false + } + for _, r := range name { + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r == '_' || r == '-': + case r == '.': + default: + return false + } + } + return true +} + +var responseParams = map[string]bool{ + "content-type": true, + "content-language": true, + "expires": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, +} + +type objectResource struct { + name string + version string + bucket *bucket // always non-nil. + object *object // may be nil. +} + +// GET on an object gets the contents of the object. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html +func (objr objectResource) get(a *action) interface{} { + obj := objr.object + if obj == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + h := a.w.Header() + // add metadata + for name, d := range obj.meta { + h[name] = d + } + // override header values in response to request parameters. + for name, vals := range a.req.Form { + if strings.HasPrefix(name, "response-") { + name = name[len("response-"):] + if !responseParams[name] { + continue + } + h.Set(name, vals[0]) + } + } + if r := a.req.Header.Get("Range"); r != "" { + fatalf(400, "NotImplemented", "range unimplemented") + } + // TODO Last-Modified-Since + // TODO If-Modified-Since + // TODO If-Unmodified-Since + // TODO If-Match + // TODO If-None-Match + // TODO Connection: close ?? + // TODO x-amz-request-id + h.Set("Content-Length", fmt.Sprint(len(obj.data))) + h.Set("ETag", hex.EncodeToString(obj.checksum)) + h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) + if a.req.Method == "HEAD" { + return nil + } + // TODO avoid holding the lock when writing data. + _, err := a.w.Write(obj.data) + if err != nil { + // we can't do much except just log the fact. + log.Printf("error writing data: %v", err) + } + return nil +} + +var metaHeaders = map[string]bool{ + "Content-MD5": true, + "x-amz-acl": true, + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, +} + +// PUT on an object creates the object. +func (objr objectResource) put(a *action) interface{} { + // TODO Cache-Control header + // TODO Expires header + // TODO x-amz-server-side-encryption + // TODO x-amz-storage-class + + // TODO is this correct, or should we erase all previous metadata? + obj := objr.object + if obj == nil { + obj = &object{ + name: objr.name, + meta: make(http.Header), + } + } + + var expectHash []byte + if c := a.req.Header.Get("Content-MD5"); c != "" { + var err error + expectHash, err = base64.StdEncoding.DecodeString(c) + if err != nil || len(expectHash) != md5.Size { + fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") + } + } + sum := md5.New() + // TODO avoid holding lock while reading data. + data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) + if err != nil { + fatalf(400, "TODO", "read error") + } + gotHash := sum.Sum(nil) + if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { + fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") + } + if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { + fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") + } + + // PUT request has been successful - save data and metadata + for key, values := range a.req.Header { + key = http.CanonicalHeaderKey(key) + if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { + obj.meta[key] = values + } + } + obj.data = data + obj.checksum = gotHash + obj.mtime = time.Now() + objr.bucket.objects[objr.name] = obj + return nil +} + +func (objr objectResource) delete(a *action) interface{} { + delete(objr.bucket.objects, objr.name) + return nil +} + +func (objr objectResource) post(a *action) interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +type CreateBucketConfiguration struct { + LocationConstraint string +} + +// locationConstraint parses the request body (if present). +// If there is no body, an empty string will be returned. +func locationConstraint(a *action) string { + var body bytes.Buffer + if _, err := io.Copy(&body, a.req.Body); err != nil { + fatalf(400, "InvalidRequest", err.Error()) + } + if body.Len() == 0 { + return "" + } + var loc CreateBucketConfiguration + if err := xml.NewDecoder(&body).Decode(&loc); err != nil { + fatalf(400, "InvalidRequest", err.Error()) + } + return loc.LocationConstraint +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go new file mode 100644 index 000000000..c8e57a2f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "github.com/goamz/goamz/aws" + "log" + "sort" + "strings" +) + +var b64 = base64.StdEncoding + +// ---------------------------------------------------------------------------- +// S3 signing (http://goo.gl/G1LrK) + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { + var md5, ctype, date, xamz string + var xamzDate bool + var sarray []string + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + sarray = append(sarray, k+":"+vall) + if k == "x-amz-date" { + xamzDate = true + date = "" + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + // Query string request authentication alternative. + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{auth.AccessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + // "When signing you do not encode these values." + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath + hash := hmac.New(sha1.New, []byte(auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(signature)} + } else { + headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} + } + if debug { + log.Printf("Signature payload: %q", payload) + log.Printf("Signature: %q", signature) + } +} diff --git a/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go new file mode 100644 index 000000000..112e1ca3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/goamz/goamz/s3/sign_test.go @@ -0,0 +1,132 @@ +package s3_test + +import ( + "github.com/goamz/goamz/aws" + "github.com/goamz/goamz/s3" + . "gopkg.in/check.v1" +) + +// S3 ReST authentication docs: http://goo.gl/G1LrK + +var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"} + +func (s *S) TestSignExampleObjectGet(c *C) { + method := "GET" + path := "/johnsmith/photos/puppy.jpg" + headers := map[string][]string{ + "Host": {"johnsmith.s3.amazonaws.com"}, + "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"}, + } + s3.Sign(testAuth, method, path, nil, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleObjectPut(c *C) { + method := "PUT" + path := "/johnsmith/photos/puppy.jpg" + headers := map[string][]string{ + "Host": {"johnsmith.s3.amazonaws.com"}, + "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"}, + "Content-Type": {"image/jpeg"}, + "Content-Length": {"94328"}, + } + s3.Sign(testAuth, method, path, nil, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleList(c *C) { + method := "GET" + path := "/johnsmith/" + params := map[string][]string{ + "prefix": {"photos"}, + "max-keys": {"50"}, + "marker": {"puppy"}, + } + headers := map[string][]string{ + "Host": {"johnsmith.s3.amazonaws.com"}, + "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"}, + "User-Agent": {"Mozilla/5.0"}, + } + s3.Sign(testAuth, method, path, params, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleFetch(c *C) { + method := "GET" + path := "/johnsmith/" + params := map[string][]string{ + "acl": {""}, + } + headers := map[string][]string{ + "Host": {"johnsmith.s3.amazonaws.com"}, + "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"}, + } + s3.Sign(testAuth, method, path, params, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleDelete(c *C) { + method := "DELETE" + path := "/johnsmith/photos/puppy.jpg" + params := map[string][]string{} + headers := map[string][]string{ + "Host": {"s3.amazonaws.com"}, + "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"}, + "User-Agent": {"dotnet"}, + "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"}, + } + s3.Sign(testAuth, method, path, params, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleUpload(c *C) { + method := "PUT" + path := "/static.johnsmith.net/db-backup.dat.gz" + params := map[string][]string{} + headers := map[string][]string{ + "Host": {"static.johnsmith.net:8080"}, + "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"}, + "User-Agent": {"curl/7.15.5"}, + "x-amz-acl": {"public-read"}, + "content-type": {"application/x-download"}, + "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="}, + "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"}, + "X-Amz-Meta-FileChecksum": {"0x02661779"}, + "X-Amz-Meta-ChecksumAlgorithm": {"crc32"}, + "Content-Disposition": {"attachment; filename=database.dat"}, + "Content-Encoding": {"gzip"}, + "Content-Length": {"5913339"}, + } + s3.Sign(testAuth, method, path, params, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleListAllMyBuckets(c *C) { + method := "GET" + path := "/" + headers := map[string][]string{ + "Host": {"s3.amazonaws.com"}, + "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"}, + } + s3.Sign(testAuth, method, path, nil, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} + +func (s *S) TestSignExampleUnicodeKeys(c *C) { + method := "GET" + path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re" + headers := map[string][]string{ + "Host": {"s3.amazonaws.com"}, + "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"}, + } + s3.Sign(testAuth, method, path, nil, headers) + expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY=" + c.Assert(headers["Authorization"], DeepEquals, []string{expected}) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 000000000..6796581fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go new file mode 100644 index 000000000..81cb128b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go new file mode 100644 index 000000000..9814c501e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go new file mode 100644 index 000000000..73c740031 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 000000000..d87d46576 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go new file mode 100644 index 000000000..c5f97b2b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..9a5e381a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..af31d2395 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,366 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "path" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { + length := len(pairs) + if length%2 != 0 { + return nil, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go new file mode 100644 index 000000000..6b2c1d22f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,1012 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/context" +) + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + shouldMatch: true, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go new file mode 100644 index 000000000..1f7c190c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..aa3067986 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,272 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..d4f014688 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -0,0 +1,571 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMap(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairs(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairs(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore b/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml new file mode 100644 index 000000000..8687342e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS b/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..b003eca0c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Joachim Bauch + diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE b/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go new file mode 100644 index 000000000..f66fc36bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go @@ -0,0 +1,19 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "testing" +) + +func BenchmarkMaskBytes(b *testing.B) { + var key [4]byte + data := make([]byte, 1024) + pos := 0 + for i := 0; i < b.N; i++ { + pos = maskBytes(key, pos, data) + } + b.SetBytes(int64(len(data))) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..5bc27e193 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + acceptKey := computeAcceptKey(challengeKey) + + c = newConn(netConn, false, readBufSize, writeBufSize) + p := c.writeBuf[:0] + p = append(p, "GET "...) + p = append(p, u.RequestURI()...) + p = append(p, " HTTP/1.1\r\nHost: "...) + p = append(p, u.Host...) + // "Upgrade" is capitalized for servers that do not use case insensitive + // comparisons on header tokens. + p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) + p = append(p, challengeKey...) + p = append(p, "\r\n"...) + for k, vs := range requestHeader { + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + p = append(p, v...) + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + if _, err := netConn.Write(p); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != acceptKey { + return nil, resp, ErrBadHandshake + } + c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + return c, resp, nil +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // Input and output buffer sizes. If the buffer size is zero, then a + // default value of 4096 is used. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. The url.Parse function is not used here because +// url.Parse mangles the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + // + // We don't use the net/url parser here because the dialer interface does + // not provide a way for applications to work around percent deocding in + // the net/url parser. + + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + u.Host = s + u.Opaque = "/" + if i := strings.Index(s, "/"); i >= 0 { + u.Host = s[:i] + u.Opaque = s[i:] + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + if u.Scheme == "wss" { + hostPort += ":443" + } else { + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer *Dialer + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + hostPort, hostNoPort := hostPortNoPort(u) + + if d == nil { + d = &Dialer{} + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if u.Scheme == "wss" { + cfg := d.TLSClientConfig + if cfg == nil { + cfg = &tls.Config{ServerName: hostNoPort} + } else if cfg.ServerName == "" { + shallowCopy := *cfg + cfg = &shallowCopy + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + if len(d.Subprotocols) > 0 { + h := http.Header{} + for k, v := range requestHeader { + h[k] = v + } + h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) + requestHeader = h + } + + if len(requestHeader["Host"]) > 0 { + // This can be used to supply a Host: header which is different from + // the dial address. + u.Host = requestHeader.Get("Host") + + // Drop "Host" header + h := http.Header{} + for k, v := range requestHeader { + if k == "Host" { + continue + } + h[k] = v + } + requestHeader = h + } + + conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) + + if err != nil { + if err == ErrBadHandshake { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + } + return nil, resp, err + } + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go new file mode 100644 index 000000000..749ef2050 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go @@ -0,0 +1,323 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "crypto/x509" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + "time" +) + +var cstUpgrader = Upgrader{ + Subprotocols: []string{"p0", "p1"}, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) { + http.Error(w, reason.Error(), status) + }, +} + +var cstDialer = Dialer{ + Subprotocols: []string{"p1", "p2"}, + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +type cstHandler struct{ *testing.T } + +type cstServer struct { + *httptest.Server + URL string +} + +func newServer(t *testing.T) *cstServer { + var s cstServer + s.Server = httptest.NewServer(cstHandler{t}) + s.URL = makeWsProto(s.Server.URL) + return &s +} + +func newTLSServer(t *testing.T) *cstServer { + var s cstServer + s.Server = httptest.NewTLSServer(cstHandler{t}) + s.URL = makeWsProto(s.Server.URL) + return &s +} + +func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Logf("method %s not allowed", r.Method) + http.Error(w, "method not allowed", 405) + return + } + subprotos := Subprotocols(r) + if !reflect.DeepEqual(subprotos, cstDialer.Subprotocols) { + t.Logf("subprotols=%v, want %v", subprotos, cstDialer.Subprotocols) + http.Error(w, "bad protocol", 400) + return + } + ws, err := cstUpgrader.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}}) + if err != nil { + t.Logf("Upgrade: %v", err) + return + } + defer ws.Close() + + if ws.Subprotocol() != "p1" { + t.Logf("Subprotocol() = %s, want p1", ws.Subprotocol()) + ws.Close() + return + } + op, rd, err := ws.NextReader() + if err != nil { + t.Logf("NextReader: %v", err) + return + } + wr, err := ws.NextWriter(op) + if err != nil { + t.Logf("NextWriter: %v", err) + return + } + if _, err = io.Copy(wr, rd); err != nil { + t.Logf("NextWriter: %v", err) + return + } + if err := wr.Close(); err != nil { + t.Logf("Close: %v", err) + return + } +} + +func makeWsProto(s string) string { + return "ws" + strings.TrimPrefix(s, "http") +} + +func sendRecv(t *testing.T, ws *Conn) { + const message = "Hello World!" + if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil { + t.Fatalf("SetWriteDeadline: %v", err) + } + if err := ws.WriteMessage(TextMessage, []byte(message)); err != nil { + t.Fatalf("WriteMessage: %v", err) + } + if err := ws.SetReadDeadline(time.Now().Add(time.Second)); err != nil { + t.Fatalf("SetReadDeadline: %v", err) + } + _, p, err := ws.ReadMessage() + if err != nil { + t.Fatalf("ReadMessage: %v", err) + } + if string(p) != message { + t.Fatalf("message=%s, want %s", p, message) + } +} + +func TestDial(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.URL, nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func TestDialTLS(t *testing.T) { + s := newTLSServer(t) + defer s.Close() + + certs := x509.NewCertPool() + for _, c := range s.TLS.Certificates { + roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) + if err != nil { + t.Fatalf("error parsing server's root cert: %v", err) + } + for _, root := range roots { + certs.AddCert(root) + } + } + + u, _ := url.Parse(s.URL) + d := cstDialer + d.NetDial = func(network, addr string) (net.Conn, error) { return net.Dial(network, u.Host) } + d.TLSClientConfig = &tls.Config{RootCAs: certs} + ws, _, err := d.Dial("wss://example.com/", nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func xTestDialTLSBadCert(t *testing.T) { + // This test is deactivated because of noisy logging from the net/http package. + s := newTLSServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func xTestDialTLSNoVerify(t *testing.T) { + s := newTLSServer(t) + defer s.Close() + + d := cstDialer + d.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + ws, _, err := d.Dial(s.URL, nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func TestDialTimeout(t *testing.T) { + s := newServer(t) + defer s.Close() + + d := cstDialer + d.HandshakeTimeout = -1 + ws, _, err := d.Dial(s.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func TestDialBadScheme(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.Server.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func TestDialBadOrigin(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}}) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } + if resp == nil { + t.Fatalf("resp=nil, err=%v", err) + } + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("status=%d, want %d", resp.StatusCode, http.StatusForbidden) + } +} + +func TestHandshake(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {s.URL}}) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + + var sessionID string + for _, c := range resp.Cookies() { + if c.Name == "sessionID" { + sessionID = c.Value + } + } + if sessionID != "1234" { + t.Error("Set-Cookie not received from the server.") + } + + if ws.Subprotocol() != "p1" { + t.Errorf("ws.Subprotocol() = %s, want p1", ws.Subprotocol()) + } + sendRecv(t, ws) +} + +func TestRespOnBadHandshake(t *testing.T) { + const expectedStatus = http.StatusGone + const expectedBody = "This is the response body." + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(expectedStatus) + io.WriteString(w, expectedBody) + })) + defer s.Close() + + ws, resp, err := cstDialer.Dial(makeWsProto(s.URL), nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } + + if resp == nil { + t.Fatalf("resp=nil, err=%v", err) + } + + if resp.StatusCode != expectedStatus { + t.Errorf("resp.StatusCode=%d, want %d", resp.StatusCode, expectedStatus) + } + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("ReadFull(resp.Body) returned error %v", err) + } + + if string(p) != expectedBody { + t.Errorf("resp.Body=%s, want %s", p, expectedBody) + } +} + +// If the Host header is specified in `Dial()`, the server must receive it as +// the `Host:` header. +func TestHostHeader(t *testing.T) { + s := newServer(t) + defer s.Close() + + specifiedHost := make(chan string, 1) + origHandler := s.Server.Config.Handler + + // Capture the request Host header. + s.Server.Config.Handler = http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + specifiedHost <- r.Host + origHandler.ServeHTTP(w, r) + }) + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Host": {"testhost"}}) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("resp.StatusCode = %v, want http.StatusSwitchingProtocols", resp.StatusCode) + } + + if gotHost := <-specifiedHost; gotHost != "testhost" { + t.Fatalf("gotHost = %q, want \"testhost\"", gotHost) + } + + sendRecv(t, ws) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go new file mode 100644 index 000000000..d2f2ebd79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/url" + "reflect" + "testing" +) + +var parseURLTests = []struct { + s string + u *url.URL +}{ + {"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, + {"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, + {"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}}, + {"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}}, + {"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}}, + {"ss://example.com/a/b", nil}, +} + +func TestParseURL(t *testing.T) { + for _, tt := range parseURLTests { + u, err := parseURL(tt.s) + if tt.u != nil && err != nil { + t.Errorf("parseURL(%q) returned error %v", tt.s, err) + continue + } + if tt.u == nil && err == nil { + t.Errorf("parseURL(%q) did not return error", tt.s) + continue + } + if !reflect.DeepEqual(u, tt.u) { + t.Errorf("parseURL(%q) returned %v, want %v", tt.s, u, tt.u) + continue + } + } +} + +var hostPortNoPortTests = []struct { + u *url.URL + hostPort, hostNoPort string +}{ + {&url.URL{Scheme: "ws", Host: "example.com"}, "example.com:80", "example.com"}, + {&url.URL{Scheme: "wss", Host: "example.com"}, "example.com:443", "example.com"}, + {&url.URL{Scheme: "ws", Host: "example.com:7777"}, "example.com:7777", "example.com"}, + {&url.URL{Scheme: "wss", Host: "example.com:7777"}, "example.com:7777", "example.com"}, +} + +func TestHostPortNoPort(t *testing.T) { + for _, tt := range hostPortNoPortTests { + hostPort, hostNoPort := hostPortNoPort(tt.u) + if hostPort != tt.hostPort { + t.Errorf("hostPortNoPort(%v) returned hostPort %q, want %q", tt.u, hostPort, tt.hostPort) + } + if hostNoPort != tt.hostNoPort { + t.Errorf("hostPortNoPort(%v) returned hostNoPort %q, want %q", tt.u, hostNoPort, tt.hostNoPort) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go b/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..e719f1ce6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go @@ -0,0 +1,825 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "time" +) + +const ( + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + finalBit = 1 << 7 + maskBit = 1 << 7 + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// closeError represents close frame. +type closeError struct { + code int + text string +} + +func (e *closeError) Error() string { + return "websocket: close " + strconv.Itoa(e.code) + " " + e.text +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} + errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +// Conn represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn and closeSent + closeSent bool // true if close message was sent + + // Message writer fields. + writeErr error + writeBuf []byte // frame is constructed in this buffer. + writePos int // end of data in writeBuf. + writeFrameType int // type of the current frame. + writeSeq int // incremented to invalidate message writers. + writeDeadline time.Time + + // Read fields + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readSeq int // incremented to invalidate message readers. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + mu := make(chan bool, 1) + mu <- true + + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + + c := &Conn{ + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufferSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), + writeFrameType: noFrame, + writePos: maxFrameHeaderSize, + } + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if frameType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + n, err := c.conn.Write(buf) + if n != len(buf) { + // Close on partial write. + c.conn.Close() + } + if err != nil { + return err + } + } + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if messageType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + n, err := c.conn.Write(buf) + if n != 0 && n != len(buf) { + c.conn.Close() + } + return err +} + +// NextWriter returns a writer for the next message to send. The writer's +// Close method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// The NextWriter method and the writers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if c.writeErr != nil { + return nil, c.writeErr + } + + if c.writeFrameType != noFrame { + if err := c.flushFrame(true, nil); err != nil { + return nil, err + } + } + + if !isControl(messageType) && !isData(messageType) { + return nil, errBadWriteOpCode + } + + c.writeFrameType = messageType + return messageWriter{c, c.writeSeq}, nil +} + +func (c *Conn) flushFrame(final bool, extra []byte) error { + length := c.writePos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(c.writeFrameType) && + (!final || length > maxControlFramePayloadSize) { + c.writeSeq++ + c.writeFrameType = noFrame + c.writePos = maxFrameHeaderSize + return errInvalidControlFrame + } + + b0 := byte(c.writeFrameType) + if final { + b0 |= finalBit + } + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + if len(extra) > 0 { + c.writeErr = errors.New("websocket: internal error, extra used in client mode") + return c.writeErr + } + } + + // Write the buffers to the connection. + c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + + // Setup for next frame. + c.writePos = maxFrameHeaderSize + c.writeFrameType = continuationFrame + if final { + c.writeSeq++ + c.writeFrameType = noFrame + } + return c.writeErr +} + +type messageWriter struct { + c *Conn + seq int +} + +func (w messageWriter) err() error { + c := w.c + if c.writeSeq != w.seq { + return errWriteClosed + } + if c.writeErr != nil { + return c.writeErr + } + return nil +} + +func (w messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.c.writePos + if n <= 0 { + if err := w.c.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.c.writePos + } + if n > max { + n = max + } + return n, nil +} + +func (w messageWriter) write(final bool, p []byte) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.c.flushFrame(final, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) Write(p []byte) (int, error) { + return w.write(false, p) +} + +func (w messageWriter) WriteString(p string) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if err := w.err(); err != nil { + return 0, err + } + for { + if w.c.writePos == len(w.c.writeBuf) { + err = w.c.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.c.writePos:]) + w.c.writePos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w messageWriter) Close() error { + if err := w.err(); err != nil { + return err + } + return w.c.flushFrame(true, nil) +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + wr, err := c.NextWriter(messageType) + if err != nil { + return err + } + w := wr.(messageWriter) + if _, err := w.write(true, data); err != nil { + return err + } + if c.writeSeq == w.seq { + if err := c.flushFrame(true, nil); err != nil { + return err + } + } + return nil +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +// readFull is like io.ReadFull except that io.EOF is never returned. +func (c *Conn) readFull(p []byte) (err error) { + var n int + for n < len(p) && err == nil { + var nn int + nn, err = c.br.Read(p[n:]) + n += nn + } + if n == len(p) { + err = nil + } else if err == io.EOF { + err = errUnexpectedEOF + } + return +} + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + var b [8]byte + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + + final := b[0]&finalBit != 0 + frameType := int(b[0] & 0xf) + reserved := int((b[0] >> 4) & 0x7) + mask := b[1]&maskBit != 0 + c.readRemaining = int64(b[1] & 0x7f) + + if reserved != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + case 127: + if err := c.readFull(b[:8]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + if err := c.readFull(c.readMaskKey[:]); err != nil { + return noFrame, err + } + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload = make([]byte, c.readRemaining) + c.readRemaining = 0 + if err := c.readFull(payload); err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + closeText = string(payload[2:]) + } + switch closeCode { + case CloseNormalClosure, CloseGoingAway: + return noFrame, io.EOF + default: + return noFrame, &closeError{code: closeCode, text: closeText} + } + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// The NextReader method and the readers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + + c.readSeq++ + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + return frameType, messageReader{c, c.readSeq}, nil + } + } + return noFrame, nil, c.readErr +} + +type messageReader struct { + c *Conn + seq int +} + +func (r messageReader) Read(b []byte) (int, error) { + + if r.seq != r.c.readSeq { + return 0, io.EOF + } + + for r.c.readErr == nil { + + if r.c.readRemaining > 0 { + if int64(len(b)) > r.c.readRemaining { + b = b[:r.c.readRemaining] + } + n, err := r.c.br.Read(b) + r.c.readErr = hideTempErr(err) + if r.c.isServer { + r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + } + r.c.readRemaining -= int64(n) + return n, r.c.readErr + } + + if r.c.readFinal { + r.c.readSeq++ + return 0, io.EOF + } + + frameType, err := r.c.advanceFrame() + switch { + case err != nil: + r.c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := r.c.readErr + if err == io.EOF && r.seq == r.c.readSeq { + err = errUnexpectedEOF + } + return 0, err +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The default ping handler sends a pong to the peer. +func (c *Conn) SetPingHandler(h func(string) error) { + if h == nil { + h = func(message string) error { + c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + return nil + } + } + c.handlePing = h +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The default pong handler does nothing. +func (c *Conn) SetPongHandler(h func(string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go new file mode 100644 index 000000000..1f1197e71 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go @@ -0,0 +1,238 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "testing" + "testing/iotest" + "time" +) + +var _ net.Error = errWriteTimeout + +type fakeNetConn struct { + io.Reader + io.Writer +} + +func (c fakeNetConn) Close() error { return nil } +func (c fakeNetConn) LocalAddr() net.Addr { return nil } +func (c fakeNetConn) RemoteAddr() net.Addr { return nil } +func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } + +func TestFraming(t *testing.T) { + frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537} + var readChunkers = []struct { + name string + f func(io.Reader) io.Reader + }{ + {"half", iotest.HalfReader}, + {"one", iotest.OneByteReader}, + {"asis", func(r io.Reader) io.Reader { return r }}, + } + + writeBuf := make([]byte, 65537) + for i := range writeBuf { + writeBuf[i] = byte(i) + } + + for _, isServer := range []bool{true, false} { + for _, chunker := range readChunkers { + + var connBuf bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) + rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024) + + for _, n := range frameSizes { + for _, iocopy := range []bool{true, false} { + name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy) + + w, err := wc.NextWriter(TextMessage) + if err != nil { + t.Errorf("%s: wc.NextWriter() returned %v", name, err) + continue + } + var nn int + if iocopy { + var n64 int64 + n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n])) + nn = int(n64) + } else { + nn, err = w.Write(writeBuf[:n]) + } + if err != nil || nn != n { + t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err) + continue + } + err = w.Close() + if err != nil { + t.Errorf("%s: w.Close() returned %v", name, err) + continue + } + + opCode, r, err := rc.NextReader() + if err != nil || opCode != TextMessage { + t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err) + continue + } + rbuf, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: ReadFull() returned rbuf, %v", name, err) + continue + } + + if len(rbuf) != n { + t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n) + continue + } + + for i, b := range rbuf { + if byte(i) != b { + t.Errorf("%s: bad byte at offset %d", name, i) + break + } + } + } + } + } + } +} + +func TestControl(t *testing.T) { + const message = "this is a ping/pong messsage" + for _, isServer := range []bool{true, false} { + for _, isWriteControl := range []bool{true, false} { + name := fmt.Sprintf("s:%v, wc:%v", isServer, isWriteControl) + var connBuf bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) + rc := newConn(fakeNetConn{Reader: &connBuf, Writer: nil}, !isServer, 1024, 1024) + if isWriteControl { + wc.WriteControl(PongMessage, []byte(message), time.Now().Add(time.Second)) + } else { + w, err := wc.NextWriter(PongMessage) + if err != nil { + t.Errorf("%s: wc.NextWriter() returned %v", name, err) + continue + } + if _, err := w.Write([]byte(message)); err != nil { + t.Errorf("%s: w.Write() returned %v", name, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("%s: w.Close() returned %v", name, err) + continue + } + var actualMessage string + rc.SetPongHandler(func(s string) error { actualMessage = s; return nil }) + rc.NextReader() + if actualMessage != message { + t.Errorf("%s: pong=%q, want %q", name, actualMessage, message) + continue + } + } + } + } +} + +func TestCloseBeforeFinalFrame(t *testing.T) { + const bufSize = 512 + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + + w, _ := wc.NextWriter(BinaryMessage) + w.Write(make([]byte, bufSize+bufSize/2)) + wc.WriteControl(CloseMessage, FormatCloseMessage(CloseNormalClosure, ""), time.Now().Add(10*time.Second)) + w.Close() + + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != errUnexpectedEOF { + t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) + } + _, _, err = rc.NextReader() + if err != io.EOF { + t.Fatalf("NextReader() returned %v, want %v", err, io.EOF) + } +} + +func TestEOFBeforeFinalFrame(t *testing.T) { + const bufSize = 512 + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + + w, _ := wc.NextWriter(BinaryMessage) + w.Write(make([]byte, bufSize+bufSize/2)) + + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != errUnexpectedEOF { + t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) + } + _, _, err = rc.NextReader() + if err != errUnexpectedEOF { + t.Fatalf("NextReader() returned %v, want %v", err, errUnexpectedEOF) + } +} + +func TestReadLimit(t *testing.T) { + + const readLimit = 512 + message := make([]byte, readLimit+1) + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + rc.SetReadLimit(readLimit) + + // Send message at the limit with interleaved pong. + w, _ := wc.NextWriter(BinaryMessage) + w.Write(message[:readLimit-1]) + wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second)) + w.Write(message[:1]) + w.Close() + + // Send message larger than the limit. + wc.WriteMessage(BinaryMessage, message[:readLimit+1]) + + op, _, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("1: NextReader() returned %d, %v", op, err) + } + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("2: NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != ErrReadLimit { + t.Fatalf("io.Copy() returned %v", err) + } +} + +func TestUnderlyingConn(t *testing.T) { + var b1, b2 bytes.Buffer + fc := fakeNetConn{Reader: &b1, Writer: &b2} + c := newConn(fc, true, 1024, 1024) + ul := c.UnderlyingConn() + if ul != fc { + t.Fatalf("Underlying conn is not what it should be.") + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go b/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..0d2bd912b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection WriteMessage and ReadMessages methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// snippet shows how to echo messages using the NextWriter and NextReader +// methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received ping and pong messages by invoking a callback +// function set with SetPingHandler and SetPongHandler methods. These callback +// functions can be invoked from the ReadMessage method, the NextReader method +// or from a call to the data message reader returned from NextReader. +// +// Connections handle received close messages by returning an error from the +// ReadMessage method, the NextReader method or from a call to the data message +// reader returned from NextReader. +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods +// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read +// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do +// support a concurrent reader and writer. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Read is Required +// +// The application must read the connection to process ping and close messages +// sent from the peer. If the application is not otherwise interested in +// messages from the peer, then the application should start a goroutine to read +// and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +package websocket diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json new file mode 100644 index 000000000..27d5a5b14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json @@ -0,0 +1,14 @@ + +{ + "options": {"failByDrop": false}, + "outdir": "./reports/clients", + "servers": [ + {"agent": "ReadAllWriteMessage", "url": "ws://localhost:9000/m", "options": {"version": 18}}, + {"agent": "ReadAllWrite", "url": "ws://localhost:9000/r", "options": {"version": 18}}, + {"agent": "CopyFull", "url": "ws://localhost:9000/f", "options": {"version": 18}}, + {"agent": "CopyWriterOnly", "url": "ws://localhost:9000/c", "options": {"version": 18}} + ], + "cases": ["*"], + "exclude-cases": [], + "exclude-agent-cases": {} +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go new file mode 100644 index 000000000..d96ac84db --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command server is a test server for the Autobahn WebSockets Test Suite. +package main + +import ( + "errors" + "flag" + "github.com/gorilla/websocket" + "io" + "log" + "net/http" + "time" + "unicode/utf8" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 4096, + WriteBufferSize: 4096, + CheckOrigin: func(r *http.Request) bool { + return true + }, +} + +// echoCopy echoes messages from the client using io.Copy. +func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, r, err := conn.NextReader() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + if writerOnly { + _, err = io.Copy(struct{ io.Writer }{w}, r) + } else { + _, err = io.Copy(w, r) + } + if err != nil { + if err == errInvalidUTF8 { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + } + log.Println("Copy:", err) + return + } + err = w.Close() + if err != nil { + log.Println("Close:", err) + return + } + } +} + +func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, true) +} + +func echoCopyFull(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, false) +} + +// echoReadAll echoes messages from the client by reading the entire message +// with ioutil.ReadAll. +func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, b, err := conn.ReadMessage() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + if !utf8.Valid(b) { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + log.Println("ReadAll: invalid utf8") + } + } + if writeMessage { + err = conn.WriteMessage(mt, b) + if err != nil { + log.Println("WriteMessage:", err) + } + } else { + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if _, err := w.Write(b); err != nil { + log.Println("Writer:", err) + return + } + if err := w.Close(); err != nil { + log.Println("Close:", err) + return + } + } + } +} + +func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, false) +} + +func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, true) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found.", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + io.WriteString(w, "Echo Server") +} + +var addr = flag.String("addr", ":9000", "http service address") + +func main() { + flag.Parse() + http.HandleFunc("/", serveHome) + http.HandleFunc("/c", echoCopyWriterOnly) + http.HandleFunc("/f", echoCopyFull) + http.HandleFunc("/r", echoReadAllWriter) + http.HandleFunc("/m", echoReadAllWriteMessage) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} + +type validator struct { + state int + x rune + r io.Reader +} + +var errInvalidUTF8 = errors.New("invalid utf8") + +func (r *validator) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + state := r.state + x := r.x + for _, b := range p[:n] { + state, x = decode(state, x, b) + if state == utf8Reject { + break + } + } + r.state = state + r.x = x + if state == utf8Reject || (err == io.EOF && state != utf8Accept) { + return n, errInvalidUTF8 + } + return n, err +} + +// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +// +// Copyright (c) 2008-2009 Bjoern Hoehrmann +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +// IN THE SOFTWARE. +var utf8d = [...]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df + 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef + 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 +} + +const ( + utf8Accept = 0 + utf8Reject = 1 +) + +func decode(state int, x rune, b byte) (int, rune) { + t := utf8d[b] + if state != utf8Accept { + x = rune(b&0x3f) | (x << 6) + } else { + x = rune((0xff >> t) & b) + } + state = int(utf8d[256+state*16+int(t)]) + return state, x +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go new file mode 100644 index 000000000..7cc0496c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "github.com/gorilla/websocket" + "log" + "net/http" + "time" +) + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Maximum message size allowed from peer. + maxMessageSize = 512 +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +// connection is an middleman between the websocket connection and the hub. +type connection struct { + // The websocket connection. + ws *websocket.Conn + + // Buffered channel of outbound messages. + send chan []byte +} + +// readPump pumps messages from the websocket connection to the hub. +func (c *connection) readPump() { + defer func() { + h.unregister <- c + c.ws.Close() + }() + c.ws.SetReadLimit(maxMessageSize) + c.ws.SetReadDeadline(time.Now().Add(pongWait)) + c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, message, err := c.ws.ReadMessage() + if err != nil { + break + } + h.broadcast <- message + } +} + +// write writes a message with the given message type and payload. +func (c *connection) write(mt int, payload []byte) error { + c.ws.SetWriteDeadline(time.Now().Add(writeWait)) + return c.ws.WriteMessage(mt, payload) +} + +// writePump pumps messages from the hub to the websocket connection. +func (c *connection) writePump() { + ticker := time.NewTicker(pingPeriod) + defer func() { + ticker.Stop() + c.ws.Close() + }() + for { + select { + case message, ok := <-c.send: + if !ok { + c.write(websocket.CloseMessage, []byte{}) + return + } + if err := c.write(websocket.TextMessage, message); err != nil { + return + } + case <-ticker.C: + if err := c.write(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +// serverWs handles websocket requests from the peer. +func serveWs(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println(err) + return + } + c := &connection{send: make(chan []byte, 256), ws: ws} + h.register <- c + go c.writePump() + c.readPump() +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html new file mode 100644 index 000000000..29599225c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html @@ -0,0 +1,92 @@ + + + +Chat Example + + + + + +
+
+ + + + + diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go new file mode 100644 index 000000000..449ba753d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go @@ -0,0 +1,51 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// hub maintains the set of active connections and broadcasts messages to the +// connections. +type hub struct { + // Registered connections. + connections map[*connection]bool + + // Inbound messages from the connections. + broadcast chan []byte + + // Register requests from the connections. + register chan *connection + + // Unregister requests from connections. + unregister chan *connection +} + +var h = hub{ + broadcast: make(chan []byte), + register: make(chan *connection), + unregister: make(chan *connection), + connections: make(map[*connection]bool), +} + +func (h *hub) run() { + for { + select { + case c := <-h.register: + h.connections[c] = true + case c := <-h.unregister: + if _, ok := h.connections[c]; ok { + delete(h.connections, c) + close(c.send) + } + case m := <-h.broadcast: + for c := range h.connections { + select { + case c.send <- m: + default: + close(c.send) + delete(h.connections, c) + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go new file mode 100644 index 000000000..3c4448d72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go @@ -0,0 +1,39 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "log" + "net/http" + "text/template" +) + +var addr = flag.String("addr", ":8080", "http service address") +var homeTempl = template.Must(template.ParseFiles("home.html")) + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + homeTempl.Execute(w, r.Host) +} + +func main() { + flag.Parse() + go h.run() + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go new file mode 100644 index 000000000..a2c7b85fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go @@ -0,0 +1,193 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "text/template" + "time" + + "github.com/gorilla/websocket" +) + +const ( + // Time allowed to write the file to the client. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the client. + pongWait = 60 * time.Second + + // Send pings to client with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Poll file for changes with this period. + filePeriod = 10 * time.Second +) + +var ( + addr = flag.String("addr", ":8080", "http service address") + homeTempl = template.Must(template.New("").Parse(homeHTML)) + filename string + upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } +) + +func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return nil, lastMod, err + } + if !fi.ModTime().After(lastMod) { + return nil, lastMod, nil + } + p, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fi.ModTime(), err + } + return p, fi.ModTime(), nil +} + +func reader(ws *websocket.Conn) { + defer ws.Close() + ws.SetReadLimit(512) + ws.SetReadDeadline(time.Now().Add(pongWait)) + ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, _, err := ws.ReadMessage() + if err != nil { + break + } + } +} + +func writer(ws *websocket.Conn, lastMod time.Time) { + lastError := "" + pingTicker := time.NewTicker(pingPeriod) + fileTicker := time.NewTicker(filePeriod) + defer func() { + pingTicker.Stop() + fileTicker.Stop() + ws.Close() + }() + for { + select { + case <-fileTicker.C: + var p []byte + var err error + + p, lastMod, err = readFileIfModified(lastMod) + + if err != nil { + if s := err.Error(); s != lastError { + lastError = s + p = []byte(lastError) + } + } else { + lastError = "" + } + + if p != nil { + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.TextMessage, p); err != nil { + return + } + } + case <-pingTicker.C: + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +func serveWs(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + if _, ok := err.(websocket.HandshakeError); !ok { + log.Println(err) + } + return + } + + var lastMod time.Time + if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err != nil { + lastMod = time.Unix(0, n) + } + + go writer(ws, lastMod) + reader(ws) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + p, lastMod, err := readFileIfModified(time.Time{}) + if err != nil { + p = []byte(err.Error()) + lastMod = time.Unix(0, 0) + } + var v = struct { + Host string + Data string + LastMod string + }{ + r.Host, + string(p), + strconv.FormatInt(lastMod.UnixNano(), 16), + } + homeTempl.Execute(w, &v) +} + +func main() { + flag.Parse() + if flag.NArg() != 1 { + log.Fatal("filename not specified") + } + filename = flag.Args()[0] + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + if err := http.ListenAndServe(*addr, nil); err != nil { + log.Fatal(err) + } +} + +const homeHTML = ` + + + WebSocket Example + + +
{{.Data}}
+ + + +` diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..18e62f225 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/json.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // Decode returns io.EOF when the message is empty or all whitespace. + // Convert to io.ErrUnexpectedEOF so that application can distinguish + // between an error reading the JSON value and the connection closing. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go new file mode 100644 index 000000000..1b7a5ec8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go @@ -0,0 +1,119 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "encoding/json" + "io" + "reflect" + "testing" +) + +func TestJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := wc.WriteJSON(&expect); err != nil { + t.Fatal("write", err) + } + + if err := rc.ReadJSON(&actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} + +func TestPartialJsonRead(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var v struct { + A int + B string + } + v.A = 1 + v.B = "hello" + + messageCount := 0 + + // Partial JSON values. + + data, err := json.Marshal(v) + if err != nil { + t.Fatal(err) + } + for i := len(data) - 1; i >= 0; i-- { + if err := wc.WriteMessage(TextMessage, data[:i]); err != nil { + t.Fatal(err) + } + messageCount++ + } + + // Whitespace. + + if err := wc.WriteMessage(TextMessage, []byte(" ")); err != nil { + t.Fatal(err) + } + messageCount++ + + // Close. + + if err := wc.WriteMessage(CloseMessage, FormatCloseMessage(CloseNormalClosure, "")); err != nil { + t.Fatal(err) + } + + for i := 0; i < messageCount; i++ { + err := rc.ReadJSON(&v) + if err != io.ErrUnexpectedEOF { + t.Error("read", i, err) + } + } + + err = rc.ReadJSON(&v) + if err != io.EOF { + t.Error("final", err) + } +} + +func TestDeprecatedJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := WriteJSON(wc, &expect); err != nil { + t.Fatal("write", err) + } + + if err := ReadJSON(rc, &actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/server.go b/Godeps/_workspace/src/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..e56a00493 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/server.go @@ -0,0 +1,247 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a default value of 4096 is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + var ( + netConn net.Conn + br *bufio.Reader + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var rw *bufio.ReadWriter + netConn, rw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + br = rw.Reader + + if br.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c.subprotocol = subprotocol + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go new file mode 100644 index 000000000..ead0776af --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/http" + "reflect" + "testing" +) + +var subprotocolTests = []struct { + h string + protocols []string +}{ + {"", nil}, + {"foo", []string{"foo"}}, + {"foo,bar", []string{"foo", "bar"}}, + {"foo, bar", []string{"foo", "bar"}}, + {" foo, bar", []string{"foo", "bar"}}, + {" foo, bar ", []string{"foo", "bar"}}, +} + +func TestSubprotocols(t *testing.T) { + for _, st := range subprotocolTests { + r := http.Request{Header: http.Header{"Sec-Websocket-Protocol": {st.h}}} + protocols := Subprotocols(&r) + if !reflect.DeepEqual(st.protocols, protocols) { + t.Errorf("SubProtocols(%q) returned %#v, want %#v", st.h, protocols, st.protocols) + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/util.go b/Godeps/_workspace/src/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..ffdc265ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/util.go @@ -0,0 +1,44 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { + for _, v := range header[name] { + for _, s := range strings.Split(v, ",") { + if strings.EqualFold(value, strings.TrimSpace(s)) { + return true + } + } + } + return false +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go new file mode 100644 index 000000000..91f70ceb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go @@ -0,0 +1,34 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/http" + "testing" +) + +var tokenListContainsValueTests = []struct { + value string + ok bool +}{ + {"WebSocket", true}, + {"WEBSOCKET", true}, + {"websocket", true}, + {"websockets", false}, + {"x websocket", false}, + {"websocket x", false}, + {"other,websocket,more", true}, + {"other, websocket, more", true}, +} + +func TestTokenListContainsValue(t *testing.T) { + for _, tt := range tokenListContainsValueTests { + h := http.Header{"Upgrade": {tt.value}} + ok := tokenListContainsValue(h, "Upgrade", "websocket") + if ok != tt.ok { + t.Errorf("tokenListContainsValue(h, n, %q) = %v, want %v", tt.value, ok, tt.ok) + } + } +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/CHANGELOG.md b/Godeps/_workspace/src/github.com/huandu/facebook/CHANGELOG.md new file mode 100644 index 000000000..d1c14a215 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/CHANGELOG.md @@ -0,0 +1,47 @@ +# Change Log # + +## v1.5.2 ## + +* `[FIX]` [#32](https://github.com/huandu/facebook/pull/32) BatchApi/Batch returns facebook error when access token is not valid. + +## v1.5.1 ## + +* `[FIX]` [#31](https://github.com/huandu/facebook/pull/31) When `/oauth/access_token` returns a query string instead of json, this package can correctly handle it. + +## v1.5.0 ## + +* `[NEW]` [#28](https://github.com/huandu/facebook/pull/28) Support debug mode introduced by facebook graph API v2.3. +* `[FIX]` Removed all test cases depending on facebook graph API v1.0. + +## v1.4.1 ## + +* `[NEW]` [#27](https://github.com/huandu/facebook/pull/27) Timestamp value in Graph API response can be decoded as a `time.Time` value now. Thanks, [@Lazyshot](https://github.com/Lazyshot). + +## v1.4.0 ## + +* `[FIX]` [#23](https://github.com/huandu/facebook/issues/24) Algorithm change: Camel case string to underscore string supports abbreviation + +Fix for [#23](https://github.com/huandu/facebook/issues/24) could be a breaking change. Camel case string `HTTPServer` will be converted to `http_server` instead of `h_t_t_p_server`. See issue description for detail. + +## v1.3.0 ## + +* `[NEW]` [#22](https://github.com/huandu/facebook/issues/22) Add a new helper struct `BatchResult` to hold batch request responses. + +## v1.2.0 ## + +* `[NEW]` [#20](https://github.com/huandu/facebook/issues/20) Add Decode functionality for paging results. Thanks, [@cbroglie](https://github.com/cbroglie). +* `[FIX]` [#21](https://github.com/huandu/facebook/issues/21) `Session#Inspect` cannot return error if access token is invalid. + +Fix for [#21](https://github.com/huandu/facebook/issues/21) will result a possible breaking change in `Session#Inspect`. It was return whole result returned by facebook inspect api. Now it only return its "data" sub-tree. As facebook puts everything including error message in "data" sub-tree, I believe it's reasonable to make this change. + +## v1.1.0 ## + +* `[FIX]` [#19](https://github.com/huandu/facebook/issues/19) Any valid int64 number larger than 2^53 or smaller than -2^53 can be correctly decoded without precision lost. + +Fix for [#19](https://github.com/huandu/facebook/issues/19) will result a possible breaking change in `Result#Get` and `Result#GetField`. If a JSON field is a number, these two functions will return json.Number instead of float64. + +The fix also introduces a side effect in `Result#Decode` and `Result#DecodeField`. A number field (`int*` and `float*`) can be decoded to a string. It was not allowed in previous version. + +## v1.0.0 ## + +Initial tag. Library is stable enough for all features mentioned in README.md. diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/huandu/facebook/CONTRIBUTING.md new file mode 100644 index 000000000..c001d2511 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/CONTRIBUTING.md @@ -0,0 +1,7 @@ +Thanks for contributing this project! + +Please don't forget to use `gofmt` to make your code look good. + +Here is the command I use. Please always use the same parameters. + + go fmt diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/LICENSE b/Godeps/_workspace/src/github.com/huandu/facebook/LICENSE new file mode 100644 index 000000000..9569215e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 - 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/api.go b/Godeps/_workspace/src/github.com/huandu/facebook/api.go new file mode 100644 index 000000000..57945d26e --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/api.go @@ -0,0 +1,180 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +// This is a Go library fully supports Facebook Graph API (both 1.0 and 2.x) with +// file upload, batch request, FQL and multi-FQL. It can be used in Google App Engine. +// +// Library design is highly influenced by facebook official PHP/JS SDK. +// If you have experience with PHP/JS SDK, you may feel quite familiar with it. +// +// Go to project home page to see samples. Link: https://github.com/huandu/facebook +// +// This library doesn't implement any deprecated old RESTful API. And it won't. +package facebook + +import ( + "net/http" +) + +var ( + // Default facebook api version. + // It can be any valid version string (e.g. "v2.3") or empty. + // + // See https://developers.facebook.com/docs/apps/versions for details. + Version string + + // Set app level debug mode. + // After setting DebugMode, all newly created session will use the mode + // to communicate with graph API. + // + // See https://developers.facebook.com/docs/graph-api/using-graph-api/v2.3#debugging + Debug DebugMode +) + +// Makes a facebook graph api call with default session. +// +// Method can be GET, POST, DELETE or PUT. +// +// Params represents query strings in this call. +// Keys and values in params will be encoded for URL automatically. So there is +// no need to encode keys or values in params manually. Params can be nil. +// +// If you want to get +// https://graph.facebook.com/huandu?fields=name,username +// Api should be called as following +// Api("/huandu", GET, Params{"fields": "name,username"}) +// or in a simplified way +// Get("/huandu", Params{"fields": "name,username"}) +// +// Api is a wrapper of Session.Api(). It's designed for graph api that doesn't require +// app id, app secret and access token. It can be called in multiple goroutines. +// +// If app id, app secret or access token is required in graph api, caller should +// create a new facebook session through App instance instead. +func Api(path string, method Method, params Params) (Result, error) { + return defaultSession.Api(path, method, params) +} + +// Get is a short hand of Api(path, GET, params). +func Get(path string, params Params) (Result, error) { + return Api(path, GET, params) +} + +// Post is a short hand of Api(path, POST, params). +func Post(path string, params Params) (Result, error) { + return Api(path, POST, params) +} + +// Delete is a short hand of Api(path, DELETE, params). +func Delete(path string, params Params) (Result, error) { + return Api(path, DELETE, params) +} + +// Put is a short hand of Api(path, PUT, params). +func Put(path string, params Params) (Result, error) { + return Api(path, PUT, params) +} + +// Makes a batch facebook graph api call with default session. +// +// BatchApi supports most kinds of batch calls defines in facebook batch api document, +// except uploading binary data. Use Batch to do so. +// +// Note: API response is stored in "body" field of a Result. +// results, _ := BatchApi(accessToken, Params{...}, Params{...}) +// +// // Use first batch api response. +// var res1 *BatchResult +// var err error +// res1, err = results[0].Batch() +// +// if err != nil { +// // this is not a valid batch api response. +// } +// +// // Use BatchResult#Result to get response body content as Result. +// res := res1.Result +// +// Facebook document: https://developers.facebook.com/docs/graph-api/making-multiple-requests +func BatchApi(accessToken string, params ...Params) ([]Result, error) { + return Batch(Params{"access_token": accessToken}, params...) +} + +// Makes a batch facebook graph api call with default session. +// Batch is designed for more advanced usage including uploading binary files. +// +// An uploading files sample +// // equivalent to following curl command (borrowed from facebook docs) +// // curl \ +// // -F 'access_token=…' \ +// // -F 'batch=[{"method":"POST","relative_url":"me/photos","body":"message=My cat photo","attached_files":"file1"},{"method":"POST","relative_url":"me/photos","body":"message=My dog photo","attached_files":"file2"},]' \ +// // -F 'file1=@cat.gif' \ +// // -F 'file2=@dog.jpg' \ +// // https://graph.facebook.com +// Batch(Params{ +// "access_token": "the-access-token", +// "file1": File("cat.gif"), +// "file2": File("dog.jpg"), +// }, Params{ +// "method": "POST", +// "relative_url": "me/photos", +// "body": "message=My cat photo", +// "attached_files": "file1", +// }, Params{ +// "method": "POST", +// "relative_url": "me/photos", +// "body": "message=My dog photo", +// "attached_files": "file2", +// }) +// +// Facebook document: https://developers.facebook.com/docs/graph-api/making-multiple-requests +func Batch(batchParams Params, params ...Params) ([]Result, error) { + return defaultSession.Batch(batchParams, params...) +} + +// Makes a FQL query with default session. +// Returns a slice of Result. If there is no query result, the result is nil. +// +// FQL can only make query without "access_token". For query requiring "access_token", create +// Session and call its FQL method. +// +// Facebook document: https://developers.facebook.com/docs/technical-guides/fql#query +func FQL(query string) ([]Result, error) { + return defaultSession.FQL(query) +} + +// Makes a multi FQL query with default session. +// Returns a parsed Result. The key is the multi query key, and the value is the query result. +// +// MultiFQL can only make query without "access_token". For query requiring "access_token", create +// Session and call its MultiFQL method. +// +// See Session.MultiFQL document for samples. +// +// Facebook document: https://developers.facebook.com/docs/technical-guides/fql#multi +func MultiFQL(queries Params) (Result, error) { + return defaultSession.MultiFQL(queries) +} + +// Makes an arbitrary HTTP request with default session. +// It expects server responses a facebook Graph API response. +// request, _ := http.NewRequest("https://graph.facebook.com/538744468", "GET", nil) +// res, err := Request(request) +// fmt.Println(res["gender"]) // get "male" +func Request(request *http.Request) (Result, error) { + return defaultSession.Request(request) +} + +// DefaultHttpClient returns the http client for default session. +func DefaultHttpClient() HttpClient { + return defaultSession.HttpClient +} + +// SetHttpClient updates the http client of default session. +func SetHttpClient(client HttpClient) { + defaultSession.HttpClient = client +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/app.go b/Godeps/_workspace/src/github.com/huandu/facebook/app.go new file mode 100644 index 000000000..d8787aa87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/app.go @@ -0,0 +1,255 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/json" + "fmt" + "strings" +) + +// Creates a new App and sets app id and secret. +func New(appId, appSecret string) *App { + return &App{ + AppId: appId, + AppSecret: appSecret, + } +} + +// Gets application access token, useful for gathering public information about users and applications. +func (app *App) AppAccessToken() string { + return app.AppId + "|" + app.AppSecret +} + +// Parses signed request. +func (app *App) ParseSignedRequest(signedRequest string) (res Result, err error) { + strs := strings.SplitN(signedRequest, ".", 2) + + if len(strs) != 2 { + err = fmt.Errorf("invalid signed request format.") + return + } + + sig, e1 := decodeBase64URLEncodingString(strs[0]) + + if e1 != nil { + err = fmt.Errorf("cannot decode signed request sig. error is %v.", e1) + return + } + + payload, e2 := decodeBase64URLEncodingString(strs[1]) + + if e2 != nil { + err = fmt.Errorf("cannot decode signed request payload. error is %v.", e2) + return + } + + err = json.Unmarshal(payload, &res) + + if err != nil { + err = fmt.Errorf("signed request payload is not a valid json string. error is %v.", err) + return + } + + var hashMethod string + err = res.DecodeField("algorithm", &hashMethod) + + if err != nil { + err = fmt.Errorf("signed request payload doesn't contains a valid 'algorithm' field.") + return + } + + hashMethod = strings.ToUpper(hashMethod) + + if hashMethod != "HMAC-SHA256" { + err = fmt.Errorf("signed request payload uses an unknown HMAC method. expect 'HMAC-SHA256'. actual '%v'.", hashMethod) + return + } + + hash := hmac.New(sha256.New, []byte(app.AppSecret)) + hash.Write([]byte(strs[1])) // note: here uses the payload base64 string, not decoded bytes + expectedSig := hash.Sum(nil) + + if bytes.Compare(sig, expectedSig) != 0 { + err = fmt.Errorf("bad signed request signiture.") + return + } + + return +} + +// ParseCode redeems code for a valid access token. +// It's a shorthand call to ParseCodeInfo(code, ""). +// +// In facebook PHP SDK, there is a CSRF state to avoid attack. +// That state is not checked in this library. +// Caller is responsible to store and check state if possible. +func (app *App) ParseCode(code string) (token string, err error) { + token, _, _, err = app.ParseCodeInfo(code, "") + return +} + +// ParseCodeInfo redeems code for access token and returns extra information. +// The machineId is optional. +// +// See https://developers.facebook.com/docs/facebook-login/access-tokens#extending +func (app *App) ParseCodeInfo(code, machineId string) (token string, expires int, newMachineId string, err error) { + if code == "" { + err = fmt.Errorf("code is empty") + return + } + + var res Result + res, err = defaultSession.sendOauthRequest("/oauth/access_token", Params{ + "client_id": app.AppId, + "redirect_uri": app.RedirectUri, + "code": code, + }) + + if err != nil { + err = fmt.Errorf("cannot parse facebook response. error is %v.", err) + return + } + + err = res.DecodeField("access_token", &token) + + if err != nil { + return + } + + err = res.DecodeField("expires_in", &expires) + + if err != nil { + return + } + + if _, ok := res["machine_id"]; ok { + err = res.DecodeField("machine_id", &newMachineId) + } + + return +} + +// Exchange a short lived access token to a long lived access token. +// Return new access token and its expires time. +func (app *App) ExchangeToken(accessToken string) (token string, expires int, err error) { + if accessToken == "" { + err = fmt.Errorf("short lived accessToken is empty") + return + } + + var res Result + res, err = defaultSession.sendOauthRequest("/oauth/access_token", Params{ + "grant_type": "fb_exchange_token", + "client_id": app.AppId, + "client_secret": app.AppSecret, + "fb_exchange_token": accessToken, + }) + + if err != nil { + err = fmt.Errorf("cannot parse facebook response. error is %v.", err) + return + } + + err = res.DecodeField("access_token", &token) + + if err != nil { + return + } + + err = res.DecodeField("expires_in", &expires) + return +} + +// Get code from a long lived access token. +// Return the code retrieved from facebook. +func (app *App) GetCode(accessToken string) (code string, err error) { + if accessToken == "" { + err = fmt.Errorf("long lived accessToken is empty") + return + } + + var res Result + res, err = defaultSession.sendOauthRequest("/oauth/client_code", Params{ + "client_id": app.AppId, + "client_secret": app.AppSecret, + "redirect_uri": app.RedirectUri, + "access_token": accessToken, + }) + + if err != nil { + err = fmt.Errorf("cannot get code from facebook. error is %v.", err) + return + } + + err = res.DecodeField("code", &code) + return +} + +// Creates a session based on current App setting. +func (app *App) Session(accessToken string) *Session { + return &Session{ + accessToken: accessToken, + app: app, + enableAppsecretProof: app.EnableAppsecretProof, + } +} + +// Creates a session from a signed request. +// If signed request contains a code, it will automatically use this code +// to exchange a valid access token. +func (app *App) SessionFromSignedRequest(signedRequest string) (session *Session, err error) { + var res Result + + res, err = app.ParseSignedRequest(signedRequest) + + if err != nil { + return + } + + var id, token string + + res.DecodeField("user_id", &id) // it's ok without user id. + err = res.DecodeField("oauth_token", &token) + + if err == nil { + session = &Session{ + accessToken: token, + app: app, + id: id, + enableAppsecretProof: app.EnableAppsecretProof, + } + return + } + + // cannot get "oauth_token"? try to get "code". + err = res.DecodeField("code", &token) + + if err != nil { + // no code? no way to continue. + err = fmt.Errorf("cannot find 'oauth_token' and 'code'. no way to continue.") + return + } + + token, err = app.ParseCode(token) + + if err != nil { + return + } + + session = &Session{ + accessToken: token, + app: app, + id: id, + enableAppsecretProof: app.EnableAppsecretProof, + } + return +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/batch_result.go b/Godeps/_workspace/src/github.com/huandu/facebook/batch_result.go new file mode 100644 index 000000000..43a38358e --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/batch_result.go @@ -0,0 +1,52 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "encoding/json" + "net/http" +) + +type batchResultHeader struct { + Name string `facebook=",required"` + Value string `facebook=",required"` +} + +type batchResultData struct { + Code int `facebook=",required"` + Headers []batchResultHeader `facebook=",required"` + Body string `facebook=",required"` +} + +func newBatchResult(res Result) (*BatchResult, error) { + var data batchResultData + err := res.Decode(&data) + + if err != nil { + return nil, err + } + + result := &BatchResult{ + StatusCode: data.Code, + Header: http.Header{}, + Body: data.Body, + } + + err = json.Unmarshal([]byte(result.Body), &result.Result) + + if err != nil { + return nil, err + } + + // add headers to result. + for _, header := range data.Headers { + result.Header.Add(header.Name, header.Value) + } + + return result, nil +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/const.go b/Godeps/_workspace/src/github.com/huandu/facebook/const.go new file mode 100644 index 000000000..aa8be0de2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/const.go @@ -0,0 +1,74 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "encoding/json" + "reflect" + "regexp" + "time" +) + +// Facebook graph api methods. +const ( + GET Method = "GET" + POST Method = "POST" + DELETE Method = "DELETE" + PUT Method = "PUT" +) + +const ( + ERROR_CODE_UNKNOWN = -1 // unknown facebook graph api error code. + + _MIME_FORM_URLENCODED = "application/x-www-form-urlencoded" +) + +// Graph API debug mode values. +const ( + DEBUG_OFF DebugMode = "" // turn off debug. + + DEBUG_ALL DebugMode = "all" + DEBUG_INFO DebugMode = "info" + DEBUG_WARNING DebugMode = "warning" +) + +const ( + debugInfoKey = "__debug__" + debugProtoKey = "__proto__" + debugHeaderKey = "__header__" + + facebookApiVersionHeader = "facebook-api-version" + facebookDebugHeader = "x-fb-debug" + facebookRevHeader = "x-fb-rev" +) + +var ( + // Maps aliases to Facebook domains. + // Copied from Facebook PHP SDK. + domainMap = map[string]string{ + "api": "https://api.facebook.com/", + "api_video": "https://api-video.facebook.com/", + "api_read": "https://api-read.facebook.com/", + "graph": "https://graph.facebook.com/", + "graph_video": "https://graph-video.facebook.com/", + "www": "https://www.facebook.com/", + } + + // checks whether it's a video post. + regexpIsVideoPost = regexp.MustCompile(`/^(\/)(.+)(\/)(videos)$/`) + + // default facebook session. + defaultSession = &Session{} + + typeOfPointerToBinaryData = reflect.TypeOf(&binaryData{}) + typeOfPointerToBinaryFile = reflect.TypeOf(&binaryFile{}) + typeOfJSONNumber = reflect.TypeOf(json.Number("")) + typeOfTime = reflect.TypeOf(time.Time{}) + + facebookSuccessJsonBytes = []byte("true") +) diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/facebook_test.go b/Godeps/_workspace/src/github.com/huandu/facebook/facebook_test.go new file mode 100644 index 000000000..154881f38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/facebook_test.go @@ -0,0 +1,1469 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "testing" + "time" +) + +const ( + FB_TEST_APP_ID = "169186383097898" + FB_TEST_APP_SECRET = "b2e4262c306caa3c7f5215d2d099b319" + + // remeber to change it to a valid token to run test + //FB_TEST_VALID_ACCESS_TOKEN = "CAACZA38ZAD8CoBAFCaVgLBNdz0RrH45yUBUA95exI1FY5i4mZBY5iULfM3YEpS53nP6eSF4cf3nmoiePHvMkdSZApkxu1heAupW7OE8tmiySRZAYkZBZBvhveCZCgPaJlFovlI0ZAhWdWTLxxmJaZCKDG0B8n9VGEvcN3zoS1AHjokSz4aNos39xthp7XtAz9X3NRvp1qU4UTOlxK8IJOC1ApAMmvcEE0kWvgZD" + FB_TEST_VALID_ACCESS_TOKEN = "" + + // remember to change it to a valid signed request to run test + //FB_TEST_VALID_SIGNED_REQUEST = "ZAxP-ILRQBOwKKxCBMNlGmVraiowV7WFNg761OYBNGc.eyJhbGdvcml0aG0iOiJITUFDLVNIQTI1NiIsImV4cGlyZXMiOjEzNDM0OTg0MDAsImlzc3VlZF9hdCI6MTM0MzQ5MzI2NSwib2F1dGhfdG9rZW4iOiJBQUFDWkEzOFpBRDhDb0JBRFpCcmZ5TFpDanBNUVczdThVTWZmRldSWkNpZGw5Tkx4a1BsY2tTcXZaQnpzTW9OWkF2bVk2RUd2NG1hUUFaQ0t2VlpBWkJ5VXA5a0FCU2x6THFJejlvZTdOdHBzdzhyQVpEWkQiLCJ1c2VyIjp7ImNvdW50cnkiOiJ1cyIsImxvY2FsZSI6ImVuX1VTIiwiYWdlIjp7Im1pbiI6MjF9fSwidXNlcl9pZCI6IjUzODc0NDQ2OCJ9" + FB_TEST_VALID_SIGNED_REQUEST = "" + + // test binary file base64 value + FB_TEST_BINARY_JPG_FILE = "/9j/4AAQSkZJRgABAQEASABIAAD/4gv4SUNDX1BST0ZJTEUAAQEAAAvoAAAAAAIAAABtbnRy" + + "UkdCIFhZWiAH2QADABsAFQAkAB9hY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAA" + + "9tYAAQAAAADTLQAAAAAp+D3er/JVrnhC+uTKgzkNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AAAAAAAAABBkZXNjAAABRAAAAHliWFlaAAABwAAAABRiVFJDAAAB1AAACAxkbWRkAAAJ4AAA" + + "AIhnWFlaAAAKaAAAABRnVFJDAAAB1AAACAxsdW1pAAAKfAAAABRtZWFzAAAKkAAAACRia3B0" + + "AAAKtAAAABRyWFlaAAAKyAAAABRyVFJDAAAB1AAACAx0ZWNoAAAK3AAAAAx2dWVkAAAK6AAA" + + "AId3dHB0AAALcAAAABRjcHJ0AAALhAAAADdjaGFkAAALvAAAACxkZXNjAAAAAAAAAB9zUkdC" + + "IElFQzYxOTY2LTItMSBibGFjayBzY2FsZWQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "WFlaIAAAAAAAACSgAAAPhAAAts9jdXJ2AAAAAAAABAAAAAAFAAoADwAUABkAHgAjACgALQAy" + + "ADcAOwBAAEUASgBPAFQAWQBeAGMAaABtAHIAdwB8AIEAhgCLAJAAlQCaAJ8ApACpAK4AsgC3" + + "ALwAwQDGAMsA0ADVANsA4ADlAOsA8AD2APsBAQEHAQ0BEwEZAR8BJQErATIBOAE+AUUBTAFS" + + "AVkBYAFnAW4BdQF8AYMBiwGSAZoBoQGpAbEBuQHBAckB0QHZAeEB6QHyAfoCAwIMAhQCHQIm" + + "Ai8COAJBAksCVAJdAmcCcQJ6AoQCjgKYAqICrAK2AsECywLVAuAC6wL1AwADCwMWAyEDLQM4" + + "A0MDTwNaA2YDcgN+A4oDlgOiA64DugPHA9MD4APsA/kEBgQTBCAELQQ7BEgEVQRjBHEEfgSM" + + "BJoEqAS2BMQE0wThBPAE/gUNBRwFKwU6BUkFWAVnBXcFhgWWBaYFtQXFBdUF5QX2BgYGFgYn" + + "BjcGSAZZBmoGewaMBp0GrwbABtEG4wb1BwcHGQcrBz0HTwdhB3QHhgeZB6wHvwfSB+UH+AgL" + + "CB8IMghGCFoIbgiCCJYIqgi+CNII5wj7CRAJJQk6CU8JZAl5CY8JpAm6Cc8J5Qn7ChEKJwo9" + + "ClQKagqBCpgKrgrFCtwK8wsLCyILOQtRC2kLgAuYC7ALyAvhC/kMEgwqDEMMXAx1DI4MpwzA" + + "DNkM8w0NDSYNQA1aDXQNjg2pDcMN3g34DhMOLg5JDmQOfw6bDrYO0g7uDwkPJQ9BD14Peg+W" + + "D7MPzw/sEAkQJhBDEGEQfhCbELkQ1xD1ERMRMRFPEW0RjBGqEckR6BIHEiYSRRJkEoQSoxLD" + + "EuMTAxMjE0MTYxODE6QTxRPlFAYUJxRJFGoUixStFM4U8BUSFTQVVhV4FZsVvRXgFgMWJhZJ" + + "FmwWjxayFtYW+hcdF0EXZReJF64X0hf3GBsYQBhlGIoYrxjVGPoZIBlFGWsZkRm3Gd0aBBoq" + + "GlEadxqeGsUa7BsUGzsbYxuKG7Ib2hwCHCocUhx7HKMczBz1HR4dRx1wHZkdwx3sHhYeQB5q" + + "HpQevh7pHxMfPh9pH5Qfvx/qIBUgQSBsIJggxCDwIRwhSCF1IaEhziH7IiciVSKCIq8i3SMK" + + "IzgjZiOUI8Ij8CQfJE0kfCSrJNolCSU4JWgllyXHJfcmJyZXJocmtyboJxgnSSd6J6sn3CgN" + + "KD8ocSiiKNQpBik4KWspnSnQKgIqNSpoKpsqzysCKzYraSudK9EsBSw5LG4soizXLQwtQS12" + + "Last4S4WLkwugi63Lu4vJC9aL5Evxy/+MDUwbDCkMNsxEjFKMYIxujHyMioyYzKbMtQzDTNG" + + "M38zuDPxNCs0ZTSeNNg1EzVNNYc1wjX9Njc2cjauNuk3JDdgN5w31zgUOFA4jDjIOQU5Qjl/" + + "Obw5+To2OnQ6sjrvOy07azuqO+g8JzxlPKQ84z0iPWE9oT3gPiA+YD6gPuA/IT9hP6I/4kAj" + + "QGRApkDnQSlBakGsQe5CMEJyQrVC90M6Q31DwEQDREdEikTORRJFVUWaRd5GIkZnRqtG8Ec1" + + "R3tHwEgFSEtIkUjXSR1JY0mpSfBKN0p9SsRLDEtTS5pL4kwqTHJMuk0CTUpNk03cTiVObk63" + + "TwBPSU+TT91QJ1BxULtRBlFQUZtR5lIxUnxSx1MTU19TqlP2VEJUj1TbVShVdVXCVg9WXFap" + + "VvdXRFeSV+BYL1h9WMtZGllpWbhaB1pWWqZa9VtFW5Vb5Vw1XIZc1l0nXXhdyV4aXmxevV8P" + + "X2Ffs2AFYFdgqmD8YU9homH1YklinGLwY0Njl2PrZEBklGTpZT1lkmXnZj1mkmboZz1nk2fp" + + "aD9olmjsaUNpmmnxakhqn2r3a09rp2v/bFdsr20IbWBtuW4SbmtuxG8eb3hv0XArcIZw4HE6" + + "cZVx8HJLcqZzAXNdc7h0FHRwdMx1KHWFdeF2Pnabdvh3VnezeBF4bnjMeSp5iXnnekZ6pXsE" + + "e2N7wnwhfIF84X1BfaF+AX5ifsJ/I3+Ef+WAR4CogQqBa4HNgjCCkoL0g1eDuoQdhICE44VH" + + "hauGDoZyhteHO4efiASIaYjOiTOJmYn+imSKyoswi5aL/IxjjMqNMY2Yjf+OZo7OjzaPnpAG" + + "kG6Q1pE/kaiSEZJ6kuOTTZO2lCCUipT0lV+VyZY0lp+XCpd1l+CYTJi4mSSZkJn8mmia1ZtC" + + "m6+cHJyJnPedZJ3SnkCerp8dn4uf+qBpoNihR6G2oiailqMGo3aj5qRWpMelOKWpphqmi6b9" + + "p26n4KhSqMSpN6mpqhyqj6sCq3Wr6axcrNCtRK24ri2uoa8Wr4uwALB1sOqxYLHWskuywrM4" + + "s660JbSctRO1irYBtnm28Ldot+C4WbjRuUq5wro7urW7LrunvCG8m70VvY++Cr6Evv+/er/1" + + "wHDA7MFnwePCX8Lbw1jD1MRRxM7FS8XIxkbGw8dBx7/IPci8yTrJuco4yrfLNsu2zDXMtc01" + + "zbXONs62zzfPuNA50LrRPNG+0j/SwdNE08bUSdTL1U7V0dZV1tjXXNfg2GTY6Nls2fHadtr7" + + "24DcBdyK3RDdlt4c3qLfKd+v4DbgveFE4cziU+Lb42Pj6+Rz5PzlhOYN5pbnH+ep6DLovOlG" + + "6dDqW+rl63Dr++yG7RHtnO4o7rTvQO/M8Fjw5fFy8f/yjPMZ86f0NPTC9VD13vZt9vv3ivgZ" + + "+Kj5OPnH+lf65/t3/Af8mP0p/br+S/7c/23//2Rlc2MAAAAAAAAALklFQyA2MTk2Ni0yLTEg" + + "RGVmYXVsdCBSR0IgQ29sb3VyIFNwYWNlIC0gc1JHQgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AABYWVogAAAAAAAAYpkAALeFAAAY2lhZWiAAAAAAAAAAAABQAAAAAAAAbWVhcwAAAAAAAAAB" + + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACWFlaIAAAAAAAAAMWAAADMwAAAqRYWVogAAAAAAAA" + + "b6IAADj1AAADkHNpZyAAAAAAQ1JUIGRlc2MAAAAAAAAALVJlZmVyZW5jZSBWaWV3aW5nIENv" + + "bmRpdGlvbiBpbiBJRUMgNjE5NjYtMi0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABYWVog" + + "AAAAAAAA9tYAAQAAAADTLXRleHQAAAAAQ29weXJpZ2h0IEludGVybmF0aW9uYWwgQ29sb3Ig" + + "Q29uc29ydGl1bSwgMjAwOQAAc2YzMgAAAAAAAQxEAAAF3///8yYAAAeUAAD9j///+6H///2i" + + "AAAD2wAAwHX/2wBDAAUDBAQEAwUEBAQFBQUGBwwIBwcHBw8LCwkMEQ8SEhEPERETFhwXExQa" + + "FRERGCEYGh0dHx8fExciJCIeJBweHx7/2wBDAQUFBQcGBw4ICA4eFBEUHh4eHh4eHh4eHh4e" + + "Hh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh7/wAARCAAxADIDASIAAhEB" + + "AxEB/8QAHQAAAQQDAQEAAAAAAAAAAAAAAAUGBwgBAwQJAv/EADYQAAEDAwIEAgcGBwAAAAAA" + + "AAECAwQABREGIQcSEzFBUQgUIjJhgZEVQnFyobEWIzeFkrLx/8QAGQEBAAMBAQAAAAAAAAAA" + + "AAAABAECAwUG/8QAKREAAgEDAgQFBQAAAAAAAAAAAAECAxEhBBITMUGBBRQzscEiMlFhcf/a" + + "AAwDAQACEQMRAD8A23GcGQVdFS2BgPLSfdHiaZnEjWdtslhaehy0rcceCm2G0+1sd1DPbsae" + + "EvTlylyWnnG5MVbYw44hsHrIIIKVDwG/6VWTXaHJ2qJwiuuyWmXVNoUrJPKk4Hxoiozg1vTX" + + "YSqkJp7Gmd184namuAS03MSy2kJ91tKlE+ZJFK2iOMGu9OT/AFpq5IlNqQErZksJW2tIOcbA" + + "EfiDTHi2h1SA6GnNiAsFJwnPY58jQ7Floe6K0FByBvt3pEYJ/bgzluSyXh4N8WbLxEjLjttG" + + "33lhHO/DWrmCk9ittX3k589xnfzqRDXnroO+TtE8QbVdFKciuw5iA8CO7ROHEkeIKSa9CkLb" + + "dQl1lYW0sBSFA5CkncH6UiN+oeSszHyorNFSVOt1hooV/KQdj90VRdFmeZ4x6gtcpohaZLx5" + + "AAAoFfMPwGCk58Kvear3xq0tDsvFWzau6eIl05oM7yC1JPTV8M45f8aPX6N/z5XsJ0rW+wl6" + + "fYhyz9lyrVDCgA0oNykO4z2CwB7JPfFcz+kXXLq0hNjYmLIKOvIc5W2UeCUoAPN8zTtkQ7PZ" + + "bJ1oCGmQVJUrlABAGNzj4Ab/AIVmPqQLkSHYBDkVCeo4txPK2CfAKPjQZVat9sVj8noI0YW+" + + "p5RCPpC6RRbplrnwkIDzmGHEp2ClAeyf3H0q3mj0BrSVnaBJCILKdz5IAqAdfSbc65b7tqRa" + + "W7e1cI63EkcwS3zjm7fAmpI0nxo0LqPWTWk7C7NfdWFIjyBG5WF8iSSE5PMAAnYkAGmaW6ja" + + "T5YOP4go8S8VzySTRXzmilnNuKWaS9T2S36gtTtuuLCXWXB2I7HuD9QD8qUqwTUSgpKz5Exk" + + "4u6K9a0tU+yvvwFOuMpcOGHSkLHnjfYn/tN6FEU6EMTOmpCXAtTjrhUV/AA7AUn+m9qWYNV2" + + "SwxnXGmokcyiWyQS6okA5HkAfqaj7SOp4lyt5/iCZLPQbPUSl3AOPEgbkGiwpykttzqUta4L" + + "lkdfEWbF1A1PZVJS1aYLC+rI+6XMYAT54P67VF3D25XDTd4b1FBe9XkRN2XAMnON9j3GNsfG" + + "tl8v0nUjyYMVr1K0ML5m2UjHNjsVeZ8h4V1x4DK2Exjnp8u/L479hVnTUFh4DTq8WX7LFwPS" + + "V04qCwqXpy7iQWkl0NcpQF435Sd8ZziioOQEpQlKUAJAwBjsKKr5iRXgIvpWFdqKKaEKVemf" + + "/Vj+3M/7KqEo3vK/LRRR6XJ9/dm8+nb4HFC7R/yinDA9wfL9qKK01Hpopp/UOs0UUUAWf//Z" +) + +var ( + testGlobalApp = New(FB_TEST_APP_ID, FB_TEST_APP_SECRET) +) + +type AllTypes struct { + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Float32 float32 + Float64 float64 + String string + ArrayOfInt []int + MapOfString map[string]string + NestedStruct *NestedStruct +} + +type NestedStruct struct { + Int int + String string + ArrayOfString []string +} + +type ParamsStruct struct { + Foo string + Bar *ParamsNestedStruct +} + +type ParamsNestedStruct struct { + AAA int + BBB string + CCC bool +} + +type FieldTagStruct struct { + Field1 string `facebook:"field2"` + Required string `facebook:",required"` + Foo string `facebook:"bar,required"` + CanAbsent string +} + +type MessageTag struct { + Id string + Name string + Type string +} + +type MessageTags map[string][]*MessageTag + +type NullStruct struct { + Null *int +} + +func TestApiGetUserInfoV2(t *testing.T) { + Version = "v2.2" + defer func() { + Version = "" + }() + + // It's not allowed to get user info by name. So I get "me" with access token instead. + if FB_TEST_VALID_ACCESS_TOKEN != "" { + me, err := Api("me", GET, Params{ + "access_token": FB_TEST_VALID_ACCESS_TOKEN, + }) + + if err != nil { + t.Fatalf("cannot get my info. [e:%v]", err) + } + + if e := me.Err(); e != nil { + t.Fatalf("facebook returns error. [e:%v]", e) + } + + t.Logf("my info. %v", me) + } +} + +func TestBatchApiGetInfo(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("cannot call batch api without access token. skip this test.") + } + + verifyBatchResult := func(t *testing.T, index int, res Result) { + batch, err := res.Batch() + + if err != nil { + t.Fatalf("cannot parse batch api results[%v]. [e:%v] [result:%v]", index, err, res) + } + + if batch.StatusCode != 200 { + t.Fatalf("facebook returns unexpected http status code in results[%v]. [code:%v] [result:%v]", index, batch.StatusCode, res) + } + + contentType := batch.Header.Get("Content-Type") + + if contentType == "" { + t.Fatalf("facebook returns unexpected http header in results[%v]. [header:%v]", index, batch.Header) + } + + if batch.Body == "" { + t.Fatalf("facebook returns unexpected http body in results[%v]. [body:%v]", index, batch.Body) + } + + var id string + err = batch.Result.DecodeField("id", &id) + + if err != nil { + t.Fatalf("cannot get 'id' field in results[%v]. [result:%v]", index, res) + } + + if id == "" { + t.Fatalf("facebook should return account id in results[%v].", index) + } + } + + test := func(t *testing.T) { + params1 := Params{ + "method": GET, + "relative_url": "me", + } + params2 := Params{ + "method": GET, + "relative_url": uint64(100002828925788), // id of my another facebook account + } + + results, err := BatchApi(FB_TEST_VALID_ACCESS_TOKEN, params1, params2) + + if err != nil { + t.Fatalf("cannot get batch result. [e:%v]", err) + } + + if len(results) != 2 { + t.Fatalf("batch api should return results in an array with 2 entries. [len:%v]", len(results)) + } + + if Version == "" { + t.Log("use default facebook version.") + } else { + t.Logf("global facebook version: %v", Version) + } + + for index, result := range results { + verifyBatchResult(t, index, result) + } + } + + // Use default Version. + Version = "" + test(t) + + // User "v2.2". + Version = "v2.2" + defer func() { + Version = "" + }() + test(t) + + // when providing an invalid access token, BatchApi should return a facebook error. + _, err := BatchApi("an_invalid_access_token", Params{ + "method": GET, + "relative_url": "me", + }) + + if err == nil { + t.Fatalf("expect an error when providing an invalid access token to BatchApi.") + } + + if _, ok := err.(*Error); !ok { + t.Fatalf("batch result error must be an *Error. [e:%v]", err) + } +} + +func TestApiParseSignedRequest(t *testing.T) { + if FB_TEST_VALID_SIGNED_REQUEST == "" { + t.Logf("skip this case as we don't have a valid signed request.") + return + } + + app := New(FB_TEST_APP_ID, FB_TEST_APP_SECRET) + res, err := app.ParseSignedRequest(FB_TEST_VALID_SIGNED_REQUEST) + + if err != nil { + t.Fatalf("cannot parse signed request. [e:%v]", err) + } + + t.Logf("signed request is '%v'.", res) +} + +func TestSession(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("skip this case as we don't have a valid access token.") + } + + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + + test := func(t *testing.T, session *Session) { + id, err := session.User() + + if err != nil { + t.Fatalf("cannot get current user id. [e:%v]", err) + } + + t.Logf("current user id is %v", id) + + result, e := session.Api("/me", GET, Params{ + "fields": "id,email,website", + }) + + if e != nil { + t.Fatalf("cannot get my extended info. [e:%v]", e) + } + + if Version == "" { + t.Log("use default facebook version.") + } else { + t.Logf("global facebook version: %v", Version) + } + + if session.Version == "" { + t.Log("use default session facebook version.") + } else { + t.Logf("session facebook version: %v", session.Version) + } + + t.Logf("my extended info is: %v", result) + } + + // Default version. + test(t, session) + + // Global version overwrite default session version. + func() { + Version = "v2.2" + defer func() { + Version = "" + }() + + test(t, session) + }() + + // Session version overwrite default version. + func() { + Version = "vx.y" // an invalid version. + session.Version = "v2.2" + defer func() { + Version = "" + }() + + test(t, session) + }() + + // Session with appsecret proof enabled. + if FB_TEST_VALID_ACCESS_TOKEN != "" { + app := New(FB_TEST_APP_ID, FB_TEST_APP_SECRET) + app.EnableAppsecretProof = true + session := app.Session(FB_TEST_VALID_ACCESS_TOKEN) + + _, e := session.Api("/me", GET, Params{ + "fields": "id", + }) + + if e != nil { + t.Fatalf("cannot get my info with proof. [e:%v]", e) + } + } +} + +func TestUploadingBinary(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("skip this case as we don't have a valid access token.") + } + + buf := bytes.NewBufferString(FB_TEST_BINARY_JPG_FILE) + reader := base64.NewDecoder(base64.StdEncoding, buf) + + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + + result, e := session.Api("/me/photos", POST, Params{ + "message": "Test photo from https://github.com/huandu/facebook", + "source": Data("my_profile.jpg", reader), + }) + + if e != nil { + t.Fatalf("cannot create photo on my timeline. [e:%v]", e) + } + + var id string + e = result.DecodeField("id", &id) + + if e != nil { + t.Fatalf("facebook should return photo id on success. [e:%v]", e) + } + + t.Logf("newly created photo id is %v", id) +} + +func TestUploadBinaryWithBatch(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("skip this case as we don't have a valid access token.") + } + + buf1 := bytes.NewBufferString(FB_TEST_BINARY_JPG_FILE) + reader1 := base64.NewDecoder(base64.StdEncoding, buf1) + buf2 := bytes.NewBufferString(FB_TEST_BINARY_JPG_FILE) + reader2 := base64.NewDecoder(base64.StdEncoding, buf2) + + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + + // sample comes from facebook batch api sample. + // https://developers.facebook.com/docs/reference/api/batch/ + // + // curl + // -F 'access_token=…' \ + // -F 'batch=[{"method":"POST","relative_url":"me/photos","body":"message=My cat photo","attached_files":"file1"},{"method":"POST","relative_url":"me/photos","body":"message=My dog photo","attached_files":"file2"},]' \ + // -F 'file1=@cat.gif' \ + // -F 'file2=@dog.jpg' \ + // https://graph.facebook.com + result, e := session.Batch(Params{ + "file1": Data("cat.jpg", reader1), + "file2": Data("dog.jpg", reader2), + }, Params{ + "method": POST, + "relative_url": "me/photos", + "body": "message=My cat photo", + "attached_files": "file1", + }, Params{ + "method": POST, + "relative_url": "me/photos", + "body": "message=My dog photo", + "attached_files": "file2", + }) + + if e != nil { + t.Fatalf("cannot create photo on my timeline. [e:%v]", e) + } + + t.Logf("batch call result. [result:%v]", result) +} + +func TestSimpleFQL(t *testing.T) { + defer func() { + Version = "" + }() + + test := func(t *testing.T, session *Session) { + me, err := session.FQL("SELECT name FROM user WHERE uid = 538744468") + + if err != nil { + t.Fatalf("cannot get my info. [e:%v]", err) + } + + if len(me) != 1 { + t.Fatalf("expect to get only 1 result. [len:%v]", len(me)) + } + + t.Logf("my name. %v", me[0]["name"]) + } + + // v2.2 api doesn't allow me to query user without access token. + if FB_TEST_VALID_ACCESS_TOKEN == "" { + return + } + + Version = "v2.2" + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + test(t, session) +} + +func TestMultiFQL(t *testing.T) { + defer func() { + Version = "" + }() + + test := func(t *testing.T, session *Session) { + res, err := session.MultiFQL(Params{ + "query1": "SELECT username FROM page WHERE page_id = 20531316728", + "query2": "SELECT uid FROM user WHERE uid = 538744468", + }) + + if err != nil { + t.Fatalf("cannot get my info. [e:%v]", err) + } + + if err = res.Err(); err != nil { + t.Fatalf("fail to parse facebook api error. [e:%v]", err) + } + + var query1, query2 []Result + + err = res.DecodeField("query1", &query1) + + if err != nil { + t.Fatalf("cannot get result of query1. [e:%v]", err) + } + + if len(query1) != 1 { + t.Fatalf("expect to get only 1 result in query1. [len:%v]", len(query1)) + } + + err = res.DecodeField("query2", &query2) + + if err != nil { + t.Fatalf("cannot get result of query2. [e:%v]", err) + } + + if len(query2) != 1 { + t.Fatalf("expect to get only 1 result in query2. [len:%v]", len(query2)) + } + + var username string + var uid string + + err = query1[0].DecodeField("username", &username) + + if err != nil { + t.Fatalf("cannot decode username from query1. [e:%v]", err) + } + + if username != "facebook" { + t.Fatalf("username is expected to be 'facebook'. [username:%v]", username) + } + + err = query2[0].DecodeField("uid", &uid) + + if err != nil { + t.Fatalf("cannot decode username from query2. [e:%v] [query2:%v]", err, query2) + } + + if uid != "538744468" { + t.Fatalf("username is expected to be 'facebook'. [username:%v]", username) + } + } + + // v2.2 api doesn't allow me to query user without access token. + if FB_TEST_VALID_ACCESS_TOKEN == "" { + return + } + + Version = "v2.2" + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + test(t, session) +} + +func TestGraphDebuggingAPI(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("cannot call batch api without access token. skip this test.") + } + + test := func(t *testing.T, session *Session) { + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + defer session.SetAccessToken("") + + // test app must not grant "read_friendlists" permission. + // otherwise there is no way to get a warning from facebook. + res, _ := session.Get("/me/friendlists", nil) + + if res == nil { + t.Fatalf("res must not be nil.") + } + + debugInfo := res.DebugInfo() + + if debugInfo == nil { + t.Fatalf("debug info must exist.") + } + + t.Logf("facebook response is: %v", res) + t.Logf("debug info is: %v", *debugInfo) + + if debugInfo.Messages == nil && len(debugInfo.Messages) > 0 { + t.Fatalf("facebook must warn me for the permission issue.") + } + + msg := debugInfo.Messages[0] + + if msg.Type == "" || msg.Message == "" { + t.Fatalf("facebook must say something. [msg:%v]", msg) + } + + if debugInfo.FacebookApiVersion == "" { + t.Fatalf("facebook must tell me api version.") + } + + if debugInfo.FacebookDebug == "" { + t.Fatalf("facebook must tell me X-FB-Debug.") + } + + if debugInfo.FacebookRev == "" { + t.Fatalf("facebook must tell me x-fb-rev.") + } + } + + defer func() { + Debug = DEBUG_OFF + Version = "" + }() + + Version = "v2.2" + Debug = DEBUG_ALL + test(t, defaultSession) + session := &Session{} + session.SetDebug(DEBUG_ALL) + test(t, session) + + // test changing debug mode. + old := session.SetDebug(DEBUG_OFF) + + if old != DEBUG_ALL { + t.Fatalf("debug mode must be DEBUG_ALL. [debug:%v]", old) + } + + if session.Debug() != DEBUG_ALL { + t.Fatalf("debug mode must be DEBUG_ALL [debug:%v]", session.Debug()) + } + + Debug = DEBUG_OFF + + if session.Debug() != DEBUG_OFF { + t.Fatalf("debug mode must be DEBUG_OFF. [debug:%v]", session.Debug()) + } +} + +func TestResultDecode(t *testing.T) { + strNormal := `{ + "int": 1234, + "int8": 23, + "int16": 12345, + "int32": -127372843, + "int64": 192438483489298, + "uint": 1283829, + "uint8": 233, + "uint16": 62121, + "uint32": 3083747392, + "uint64": 2034857382993849, + "float32": 9382.38429, + "float64": 3984.293848292, + "map_of_string": {"a": "1", "b": "2"}, + "array_of_int": [12, 34, 56], + "string": "abcd", + "notused": 1234, + "nested_struct": { + "string": "hello", + "int": 123, + "array_of_string": ["a", "b", "c"] + } + }` + strOverflow := `{ + "int": 1234, + "int8": 23, + "int16": 12345, + "int32": -127372843, + "int64": 192438483489298, + "uint": 1283829, + "uint8": 233, + "uint16": 62121, + "uint32": 383083747392, + "uint64": 2034857382993849, + "float32": 9382.38429, + "float64": 3984.293848292, + "string": "abcd", + "map_of_string": {"a": "1", "b": "2"}, + "array_of_int": [12, 34, 56], + "string": "abcd", + "notused": 1234, + "nested_struct": { + "string": "hello", + "int": 123, + "array_of_string": ["a", "b", "c"] + } + }` + strMissAField := `{ + "int": 1234, + "int8": 23, + "int16": 12345, + "int32": -127372843, + + "missed": "int64", + + "uint": 1283829, + "uint8": 233, + "uint16": 62121, + "uint32": 383083747392, + "uint64": 2034857382993849, + "float32": 9382.38429, + "float64": 3984.293848292, + "string": "abcd", + "map_of_string": {"a": "1", "b": "2"}, + "array_of_int": [12, 34, 56], + "string": "abcd", + "notused": 1234, + "nested_struct": { + "string": "hello", + "int": 123, + "array_of_string": ["a", "b", "c"] + } + }` + var result Result + var err error + var normal, withError AllTypes + var anInt int + + err = json.Unmarshal([]byte(strNormal), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&normal) + + if err != nil { + t.Fatalf("cannot decode normal struct. [e:%v]", err) + } + + err = json.Unmarshal([]byte(strOverflow), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&withError) + + if err == nil { + t.Fatalf("struct should be overflow") + } + + t.Logf("overflow struct. e:%v", err) + + err = json.Unmarshal([]byte(strMissAField), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&withError) + + if err == nil { + t.Fatalf("a field in struct should absent in json map.") + } + + t.Logf("miss-a-field struct. e:%v", err) + + err = result.DecodeField("array_of_int.2", &anInt) + + if err != nil { + t.Fatalf("cannot decode array item. [e:%v]", err) + } + + if anInt != 56 { + t.Fatalf("invalid array value. expected 56, actual %v", anInt) + } + + err = result.DecodeField("nested_struct.int", &anInt) + + if err != nil { + t.Fatalf("cannot decode nested struct item. [e:%v]", err) + } + + if anInt != 123 { + t.Fatalf("invalid array value. expected 123, actual %v", anInt) + } +} + +func TestParamsEncode(t *testing.T) { + var params Params + buf := &bytes.Buffer{} + + if mime, err := params.Encode(buf); err != nil || mime != _MIME_FORM_URLENCODED || buf.Len() != 0 { + t.Fatalf("empty params must encode to an empty string. actual is [e:%v] [str:%v] [mime:%v]", err, buf.String(), mime) + } + + buf.Reset() + params = Params{} + params["need_escape"] = "&=+" + expectedEncoding := "need_escape=%26%3D%2B" + + if mime, err := params.Encode(buf); err != nil || mime != _MIME_FORM_URLENCODED || buf.String() != expectedEncoding { + t.Fatalf("wrong params encode result. expected is '%v'. actual is '%v'. [e:%v] [mime:%v]", expectedEncoding, buf.String(), err, mime) + } + + buf.Reset() + data := ParamsStruct{ + Foo: "hello, world!", + Bar: &ParamsNestedStruct{ + AAA: 1234, + BBB: "bbb", + CCC: true, + }, + } + params = MakeParams(data) + /* there is no easy way to compare two encoded maps. so i just write expect map here, not test it. + expectedParams := Params{ + "foo": "hello, world!", + "bar": map[string]interface{}{ + "aaa": 1234, + "bbb": "bbb", + "ccc": true, + }, + } + */ + + if params == nil { + t.Fatalf("make params error.") + } + + mime, err := params.Encode(buf) + t.Logf("complex encode result is '%v'. [e:%v] [mime:%v]", buf.String(), err, mime) +} + +func TestStructFieldTag(t *testing.T) { + strNormalField := `{ + "field2": "hey", + "required": "my", + "bar": "dear" + }` + strMissingField2Field := `{ + "field1": "hey", + "required": "my", + "bar": "dear" + }` + strMissingRequiredField := `{ + "field1": "hey", + "bar": "dear", + "can_absent": "babe" + }` + strMissingBarField := `{ + "field1": "hey", + "required": "my" + }` + + var result Result + var value FieldTagStruct + var err error + + err = json.Unmarshal([]byte(strNormalField), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&value) + + if err != nil { + t.Fatalf("cannot decode struct. [e:%v]", err) + } + + result = Result{} + value = FieldTagStruct{} + err = json.Unmarshal([]byte(strMissingField2Field), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&value) + + if err != nil { + t.Fatalf("cannot decode struct. [e:%v]", err) + } + + if value.Field1 != "" { + t.Fatalf("value field1 should be kept unchanged. [field1:%v]", value.Field1) + } + + result = Result{} + value = FieldTagStruct{} + err = json.Unmarshal([]byte(strMissingRequiredField), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&value) + + if err == nil { + t.Fatalf("should fail to decode struct.") + } + + t.Logf("expected decode error. [e:%v]", err) + + result = Result{} + value = FieldTagStruct{} + err = json.Unmarshal([]byte(strMissingBarField), &result) + + if err != nil { + t.Fatalf("cannot unmarshal json string. [e:%v]", err) + } + + err = result.Decode(&value) + + if err == nil { + t.Fatalf("should fail to decode struct.") + } + + t.Logf("expected decode error. [e:%v]", err) +} + +type myTime time.Time + +func TestDecodeField(t *testing.T) { + jsonStr := `{ + "int": 1234, + "array": ["abcd", "efgh"], + "map": { + "key1": 5678, + "nested_map": { + "key2": "ijkl", + "key3": [{ + "key4": "mnop" + }, { + "key5": 9012 + }] + } + }, + "message_tags": { + "2": [ + { + "id": "4838901", + "name": "Foo Bar", + "type": "page" + }, + { + "id": "293450302", + "name": "Player Rocks", + "type": "page" + } + ] + }, + "nullStruct": { + "null": null + }, + "timestamp": "2015-01-03T11:15:01+0000", + "custom_timestamp": "2014-03-04T11:15:01+0000" + }` + + var result Result + var err error + var anInt int + var aString string + var aSlice []string + var subResults []Result + var aNull NullStruct = NullStruct{ + Null: &anInt, + } + var aTimestamp time.Time + var aCustomTimestamp myTime + + err = json.Unmarshal([]byte(jsonStr), &result) + + if err != nil { + t.Fatalf("invalid json string. [e:%v]", err) + } + + err = result.DecodeField("int", &anInt) + + if err != nil { + t.Fatalf("cannot decode int field. [e:%v]", err) + } + + if anInt != 1234 { + t.Fatalf("expected int value is 1234. [int:%v]", anInt) + } + + err = result.DecodeField("array.0", &aString) + + if err != nil { + t.Fatalf("cannot decode array.0 field. [e:%v]", err) + } + + if aString != "abcd" { + t.Fatalf("expected array.0 value is 'abcd'. [string:%v]", aString) + } + + err = result.DecodeField("array.1", &aString) + + if err != nil { + t.Fatalf("cannot decode array.1 field. [e:%v]", err) + } + + if aString != "efgh" { + t.Fatalf("expected array.1 value is 'abcd'. [string:%v]", aString) + } + + err = result.DecodeField("array.2", &aString) + + if err == nil { + t.Fatalf("array.2 doesn't exist. expect an error.") + } + + err = result.DecodeField("map.key1", &anInt) + + if err != nil { + t.Fatalf("cannot decode map.key1 field. [e:%v]", err) + } + + if anInt != 5678 { + t.Fatalf("expected map.key1 value is 5678. [int:%v]", anInt) + } + + err = result.DecodeField("map.nested_map.key2", &aString) + + if err != nil { + t.Fatalf("cannot decode map.nested_map.key2 field. [e:%v]", err) + } + + if aString != "ijkl" { + t.Fatalf("expected map.nested_map.key2 value is 'ijkl'. [string:%v]", aString) + } + + err = result.DecodeField("array", &aSlice) + + if err != nil { + t.Fatalf("cannot decode array field. [e:%v]", err) + } + + if len(aSlice) != 2 || aSlice[0] != "abcd" || aSlice[1] != "efgh" { + t.Fatalf("expected array value is ['abcd', 'efgh']. [slice:%v]", aSlice) + } + + err = result.DecodeField("map.nested_map.key3", &subResults) + + if err != nil { + t.Fatalf("cannot decode map.nested_map.key3 field. [e:%v]", err) + } + + if len(subResults) != 2 { + t.Fatalf("expected sub results len is 2. [len:%v] [results:%v]", subResults) + } + + err = subResults[0].DecodeField("key4", &aString) + + if err != nil { + t.Fatalf("cannot decode key4 field in sub result. [e:%v]", err) + } + + if aString != "mnop" { + t.Fatalf("expected map.nested_map.key2 value is 'mnop'. [string:%v]", aString) + } + + err = subResults[1].DecodeField("key5", &anInt) + + if err != nil { + t.Fatalf("cannot decode key5 field. [e:%v]", err) + } + + if anInt != 9012 { + t.Fatalf("expected key5 value is 9012. [int:%v]", anInt) + } + + err = result.DecodeField("message_tags.2.0.id", &aString) + + if err != nil { + t.Fatalf("cannot decode message_tags.2.0.id field. [e:%v]", err) + } + + if aString != "4838901" { + t.Fatalf("expected message_tags.2.0.id value is '4838901'. [string:%v]", aString) + } + + var messageTags MessageTags + err = result.DecodeField("message_tags", &messageTags) + + if err != nil { + t.Fatalf("cannot decode message_tags field. [e:%v]", err) + } + + if len(messageTags) != 1 { + t.Fatalf("expect messageTags have only 1 element. [len:%v]", len(messageTags)) + } + + aString = messageTags["2"][1].Id + + if aString != "293450302" { + t.Fatalf("expect messageTags.2.1.id value is '293450302'. [value:%v]", aString) + } + + err = result.DecodeField("nullStruct", &aNull) + + if err != nil { + t.Fatalf("cannot decode nullStruct field. [e:%v]", err) + } + + if aNull.Null != nil { + t.Fatalf("expect aNull.Null is reset to nil.") + } + + err = result.DecodeField("timestamp", &aTimestamp) + + if err != nil { + t.Fatalf("cannot decode timestamp field. [e:%v]", err) + } + + if !aTimestamp.Equal(time.Date(2015, time.January, 3, 11, 15, 1, 0, time.FixedZone("no-offset", 0))) { + t.Fatalf("expect aTimestamp date to be 2015-01-03 11:15:01 +0000 [value:%v]", aTimestamp.String()) + } + + err = result.DecodeField("custom_timestamp", &aCustomTimestamp) + + if err != nil { + t.Fatalf("cannot decode custom_timestamp field. [e:%v]", err) + } + + if !time.Time(aCustomTimestamp).Equal(time.Date(2014, time.March, 4, 11, 15, 1, 0, time.FixedZone("no-offset", 0))) { + t.Fatalf("expect aCustomTimestamp date to be 2014-03-04 11:15:01 +0000 [value:%v]", time.Time(aCustomTimestamp).String()) + } +} + +func TestGraphError(t *testing.T) { + res, err := Get("/me", Params{ + "access_token": "fake", + }) + + if err == nil { + t.Fatalf("facebook should return error for bad access token. [res:%v]", res) + } + + fbErr, ok := err.(*Error) + + if !ok { + t.Fatalf("error must be a *Error. [e:%v]", err) + } + + t.Logf("facebook error. [e:%v] [message:%v] [type:%v] [code:%v] [subcode:%v]", err, fbErr.Message, fbErr.Type, fbErr.Code, fbErr.ErrorSubcode) +} + +type FacebookFriend struct { + Id string `facebook:",required"` + Name string `facebook:",required"` +} + +type FacebookFriends struct { + Friends []FacebookFriend `facebook:"data,required"` +} + +func TestPagingResultDecode(t *testing.T) { + res := Result{ + "data": []interface{}{ + map[string]interface{}{ + "name": "friend 1", + "id": "1", + }, + map[string]interface{}{ + "name": "friend 2", + "id": "2", + }, + }, + "paging": map[string]interface{}{ + "next": "https://graph.facebook.com/...", + }, + } + paging, err := newPagingResult(nil, res) + if err != nil { + t.Fatalf("cannot create paging result. [e:%v]", err) + } + var friends FacebookFriends + if err := paging.Decode(&friends); err != nil { + t.Fatalf("cannot decode paging result. [e:%v]", err) + } + if len(friends.Friends) != 2 { + t.Fatalf("expect to have 2 friends. [len:%v]", len(friends.Friends)) + } + if friends.Friends[0].Name != "friend 1" { + t.Fatalf("expect name to be 'friend 1'. [name:%v]", friends.Friends[0].Name) + } + if friends.Friends[0].Id != "1" { + t.Fatalf("expect id to be '1'. [id:%v]", friends.Friends[0].Id) + } + if friends.Friends[1].Name != "friend 2" { + t.Fatalf("expect name to be 'friend 2'. [name:%v]", friends.Friends[1].Name) + } + if friends.Friends[1].Id != "2" { + t.Fatalf("expect id to be '2'. [id:%v]", friends.Friends[1].Id) + } +} + +func TestPagingResult(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("skip this case as we don't have a valid access token.") + } + + session := &Session{} + session.SetAccessToken(FB_TEST_VALID_ACCESS_TOKEN) + res, err := session.Get("/me/home", Params{ + "limit": 2, + }) + + if err != nil { + t.Fatalf("cannot get my home post. [e:%v]", err) + } + + paging, err := res.Paging(session) + + if err != nil { + t.Fatalf("cannot get paging information. [e:%v]", err) + } + + data := paging.Data() + + if len(data) != 2 { + t.Fatalf("expect to have only 2 post. [len:%v]", len(data)) + } + + t.Logf("result: %v", res) + t.Logf("previous: %v", paging.previous) + + noMore, err := paging.Previous() + + if err != nil { + t.Fatalf("cannot get paging information. [e:%v]", err) + } + + if !noMore { + t.Fatalf("should have no more post. %v", *paging.paging.Paging) + } + + noMore, err = paging.Next() + + if err != nil { + t.Fatalf("cannot get paging information. [e:%v]", err) + } + + data = paging.Data() + + if len(data) != 2 { + t.Fatalf("expect to have only 2 post. [len:%v]", len(data)) + } + + noMore, err = paging.Next() + + if err != nil { + t.Fatalf("cannot get paging information. [e:%v]", err) + } + + if len(paging.Data()) != 2 { + t.Fatalf("expect to have only 2 post. [len:%v]", len(paging.Data())) + } +} + +func TestDecodeLargeInteger(t *testing.T) { + bigIntegers := []int64{ + 1<<53 - 2, + 1<<53 - 1, + 1 << 53, + 1<<53 + 1, + 1<<53 + 2, + + 1<<54 - 2, + 1<<54 - 1, + 1 << 54, + 1<<54 + 1, + 1<<54 + 2, + + 1<<60 - 2, + 1<<60 - 1, + 1 << 60, + 1<<60 + 1, + 1<<60 + 2, + + 1<<63 - 2, + 1<<63 - 1, + + -(1<<53 - 2), + -(1<<53 - 1), + -(1 << 53), + -(1<<53 + 1), + -(1<<53 + 2), + + -(1<<54 - 2), + -(1<<54 - 1), + -(1 << 54), + -(1<<54 + 1), + -(1<<54 + 2), + + -(1<<60 - 2), + -(1<<60 - 1), + -(1 << 60), + -(1<<60 + 1), + -(1<<60 + 2), + + -(1<<53 - 2), + -(1<<63 - 1), + -(1 << 63), + } + jsonStr := `{ + "integers": [%v] + }` + + buf := &bytes.Buffer{} + + for _, v := range bigIntegers { + buf.WriteString(fmt.Sprintf("%v", v)) + buf.WriteRune(',') + } + + buf.WriteRune('0') + json := fmt.Sprintf(jsonStr, buf.String()) + + res, err := MakeResult([]byte(json)) + + if err != nil { + t.Fatalf("cannot make result on test json string. [e:%v]", err) + } + + var actualIntegers []int64 + err = res.DecodeField("integers", &actualIntegers) + + if err != nil { + t.Fatalf("cannot decode integers from json. [e:%v]", err) + } + + if len(actualIntegers) != len(bigIntegers)+1 { + t.Fatalf("count of decoded integers is not correct. [expected:%v] [actual:%v]", len(bigIntegers)+1, len(actualIntegers)) + } + + for k, _ := range bigIntegers { + if bigIntegers[k] != actualIntegers[k] { + t.Logf("expected integers: %v", bigIntegers) + t.Logf("actual integers: %v", actualIntegers) + t.Fatalf("a decoded integer is not expected. [expected:%v] [actual:%v]", bigIntegers[k], actualIntegers[k]) + } + } +} + +func TestInspectValidToken(t *testing.T) { + if FB_TEST_VALID_ACCESS_TOKEN == "" { + t.Skipf("skip this case as we don't have a valid access token.") + } + + session := testGlobalApp.Session(FB_TEST_VALID_ACCESS_TOKEN) + result, err := session.Inspect() + + if err != nil { + t.Fatalf("cannot inspect a valid access token. [e:%v]", err) + } + + var isValid bool + err = result.DecodeField("is_valid", &isValid) + + if err != nil { + t.Fatalf("cannot get 'is_valid' in inspect result. [e:%v]", err) + } + + if !isValid { + t.Fatalf("inspect result shows access token is invalid. why? [result:%v]", result) + } +} + +func TestInspectInvalidToken(t *testing.T) { + invalidToken := "CAACZA38ZAD8CoBAe2bDC6EdThnni3b56scyshKINjZARoC9ZAuEUTgYUkYnKdimqfA2ZAXcd2wLd7Rr8jLmMXTY9vqAhQGqObZBIUz1WwbqVoCsB3AAvLtwoWNhsxM76mK0eiJSLXHZCdPVpyhmtojvzXA7f69Bm6b5WZBBXia8iOpPZAUHTGp1UQLFMt47c7RqJTrYIl3VfAR0deN82GMFL2" + session := testGlobalApp.Session(invalidToken) + result, err := session.Inspect() + + if err == nil { + t.Fatalf("facebook should indicate it's an invalid token. why not? [result:%v]", result) + } + + if _, ok := err.(*Error); !ok { + t.Fatalf("inspect error should be a standard facebook error. why not? [e:%v]", err) + } + + isValid := true + err = result.DecodeField("is_valid", &isValid) + + if err != nil { + t.Fatalf("cannot get 'is_valid' in inspect result. [e:%v]", err) + } + + if isValid { + t.Fatalf("inspect result shows access token is valid. why? [result:%v]", result) + } +} + +func TestCamelCaseToUnderScore(t *testing.T) { + cases := map[string]string{ + "TestCase": "test_case", + "HTTPServer": "http_server", + "NoHTTPS": "no_https", + "Wi_thF": "wi_th_f", + "_AnotherTES_TCaseP": "_another_tes_t_case_p", + "ALL": "all", + "UserID": "user_id", + } + + for k, v := range cases { + str := camelCaseToUnderScore(k) + + if str != v { + t.Fatalf("wrong underscore string. [expect:%v] [actual:%v]", v, str) + } + } +} + +func TestMakeSliceResult(t *testing.T) { + jsonStr := `{ + "error": { + "message": "Invalid OAuth access token.", + "type": "OAuthException", + "code": 190 + } + }` + var res []Result + err := makeResult([]byte(jsonStr), &res) + + if err == nil { + t.Fatalf("makeResult must fail") + } + + fbErr, ok := err.(*Error) + + if !ok { + t.Fatalf("error must be a facebook error. [e:%v]", err) + } + + if fbErr.Code != 190 { + t.Fatalf("invalid facebook error. [e:%v]", fbErr.Error()) + } +} + +func TestMakeSliceResultWithNilElements(t *testing.T) { + jsonStr := `[ + null, + { + "foo": "bar" + }, + null + ]` + var res []Result + err := makeResult([]byte(jsonStr), &res) + + if err != nil { + t.Fatalf("fail to decode results. [e:%v]", err) + } + + if len(res) != 3 { + t.Fatalf("expect 3 elements in res. [res:%v]", res) + } + + if res[0] != nil || res[1] == nil || res[2] != nil { + t.Fatalf("decoded res is not expected. [res:%v]", res) + } + + if res[1]["foo"].(string) != "bar" { + t.Fatalf("decode res is not expected. [res:%v]", res) + } +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/misc.go b/Godeps/_workspace/src/github.com/huandu/facebook/misc.go new file mode 100644 index 000000000..cdf7a9577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/misc.go @@ -0,0 +1,131 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "io" + "unicode" + "unicode/utf8" +) + +func camelCaseToUnderScore(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var prev, r0, r1 rune + var size int + + r0 = '_' + + for len(str) > 0 { + prev = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + switch { + case r0 == utf8.RuneError: + buf.WriteByte(byte(str[0])) + + case unicode.IsUpper(r0): + if prev != '_' { + buf.WriteRune('_') + } + + buf.WriteRune(unicode.ToLower(r0)) + + if len(str) == 0 { + break + } + + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !unicode.IsUpper(r0) { + buf.WriteRune(r0) + break + } + + // find next non-upper-case character and insert `_` properly. + // it's designed to convert `HTTPServer` to `http_server`. + // if there are more than 2 adjacent upper case characters in a word, + // treat them as an abbreviation plus a normal word. + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 == utf8.RuneError { + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteByte(byte(str[0])) + break + } + + if !unicode.IsUpper(r0) { + if r0 == '_' || r0 == ' ' || r0 == '-' { + r0 = '_' + + buf.WriteRune(unicode.ToLower(r1)) + } else { + buf.WriteRune('_') + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + } + + break + } + + buf.WriteRune(unicode.ToLower(r1)) + } + + if len(str) == 0 || r0 == '_' { + buf.WriteRune(unicode.ToLower(r0)) + break + } + + default: + if r0 == ' ' || r0 == '-' { + r0 = '_' + } + + buf.WriteRune(r0) + } + } + + return buf.String() +} + +// Returns error string. +func (e *Error) Error() string { + return e.Message +} + +// Creates a new binary data holder. +func Data(filename string, source io.Reader) *binaryData { + return &binaryData{ + Filename: filename, + Source: source, + } +} + +// Creates a binary file holder. +func File(filename, path string) *binaryFile { + return &binaryFile{ + Filename: filename, + } +} + +// Creates a binary file holder and specific a different path for reading. +func FileAlias(filename, path string) *binaryFile { + return &binaryFile{ + Filename: filename, + Path: path, + } +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/paging_result.go b/Godeps/_workspace/src/github.com/huandu/facebook/paging_result.go new file mode 100644 index 000000000..f1eb9b7f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/paging_result.go @@ -0,0 +1,146 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "fmt" + "net/http" +) + +type pagingData struct { + Data []Result `facebook:",required"` + Paging *pagingNavigator +} + +type pagingNavigator struct { + Previous string + Next string +} + +func newPagingResult(session *Session, res Result) (*PagingResult, error) { + // quick check whether Result is a paging response. + if _, ok := res["data"]; !ok { + return nil, fmt.Errorf("current Result is not a paging response.") + } + + pr := &PagingResult{ + session: session, + } + paging := &pr.paging + err := res.Decode(paging) + + if err != nil { + return nil, err + } + + if paging.Paging != nil { + pr.previous = paging.Paging.Previous + pr.next = paging.Paging.Next + } + + return pr, nil +} + +// Get current data. +func (pr *PagingResult) Data() []Result { + return pr.paging.Data +} + +// Decodes the current full result to a struct. See Result#Decode. +func (pr *PagingResult) Decode(v interface{}) (err error) { + res := Result{ + "data": pr.Data(), + } + return res.Decode(v) +} + +// Read previous page. +func (pr *PagingResult) Previous() (noMore bool, err error) { + if !pr.HasPrevious() { + noMore = true + return + } + + return pr.navigate(&pr.previous) +} + +// Read next page. +func (pr *PagingResult) Next() (noMore bool, err error) { + if !pr.HasNext() { + noMore = true + return + } + + return pr.navigate(&pr.next) +} + +// Check whether there is previous page. +func (pr *PagingResult) HasPrevious() bool { + return pr.previous != "" +} + +// Check whether there is next page. +func (pr *PagingResult) HasNext() bool { + return pr.next != "" +} + +func (pr *PagingResult) navigate(url *string) (noMore bool, err error) { + var pagingUrl string + + // add session information in paging url. + params := Params{} + pr.session.prepareParams(params) + + if len(params) == 0 { + pagingUrl = *url + } else { + buf := &bytes.Buffer{} + buf.WriteString(*url) + buf.WriteRune('&') + params.Encode(buf) + + pagingUrl = buf.String() + } + + var request *http.Request + var res Result + + request, err = http.NewRequest("GET", pagingUrl, nil) + + if err != nil { + return + } + + res, err = pr.session.Request(request) + + if err != nil { + return + } + + if pr.paging.Paging != nil { + pr.paging.Paging.Next = "" + pr.paging.Paging.Previous = "" + } + paging := &pr.paging + err = res.Decode(paging) + + if err != nil { + return + } + + if paging.Paging == nil || len(paging.Data) == 0 { + *url = "" + noMore = true + } else { + pr.previous = paging.Paging.Previous + pr.next = paging.Paging.Next + } + + return +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/params.go b/Godeps/_workspace/src/github.com/huandu/facebook/params.go new file mode 100644 index 000000000..bcce18a3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/params.go @@ -0,0 +1,227 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "encoding/json" + "io" + "mime/multipart" + "net/url" + "os" + "reflect" + "runtime" +) + +// Makes a new Params instance by given data. +// Data must be a struct or a map with string keys. +// MakeParams will change all struct field name to lower case name with underscore. +// e.g. "FooBar" will be changed to "foo_bar". +// +// Returns nil if data cannot be used to make a Params instance. +func MakeParams(data interface{}) (params Params) { + if p, ok := data.(Params); ok { + return p + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + + params = nil + } + }() + + params = makeParams(reflect.ValueOf(data)) + return +} + +func makeParams(value reflect.Value) (params Params) { + for value.Kind() == reflect.Ptr || value.Kind() == reflect.Interface { + value = value.Elem() + } + + // only map with string keys can be converted to Params + if value.Kind() == reflect.Map && value.Type().Key().Kind() == reflect.String { + params = Params{} + + for _, key := range value.MapKeys() { + params[key.String()] = value.MapIndex(key).Interface() + } + + return + } + + if value.Kind() != reflect.Struct { + return + } + + params = Params{} + num := value.NumField() + + for i := 0; i < num; i++ { + name := camelCaseToUnderScore(value.Type().Field(i).Name) + field := value.Field(i) + + for field.Kind() == reflect.Ptr { + field = field.Elem() + } + + switch field.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Invalid: + // these types won't be marshalled in json. + params = nil + return + + default: + params[name] = field.Interface() + } + } + + return +} + +// Encodes params to query string. +// If map value is not a string, Encode uses json.Marshal() to convert value to string. +// +// Encode will panic if Params contains values that cannot be marshalled to json string. +func (params Params) Encode(writer io.Writer) (mime string, err error) { + if params == nil || len(params) == 0 { + mime = _MIME_FORM_URLENCODED + return + } + + // check whether params contains any binary data. + hasBinary := false + + for _, v := range params { + typ := reflect.TypeOf(v) + + if typ == typeOfPointerToBinaryData || typ == typeOfPointerToBinaryFile { + hasBinary = true + break + } + } + + if hasBinary { + return params.encodeMultipartForm(writer) + } + + return params.encodeFormUrlEncoded(writer) +} + +func (params Params) encodeFormUrlEncoded(writer io.Writer) (mime string, err error) { + var jsonStr []byte + written := false + + for k, v := range params { + if written { + io.WriteString(writer, "&") + } + + io.WriteString(writer, url.QueryEscape(k)) + io.WriteString(writer, "=") + + if reflect.TypeOf(v).Kind() == reflect.String { + io.WriteString(writer, url.QueryEscape(reflect.ValueOf(v).String())) + } else { + jsonStr, err = json.Marshal(v) + + if err != nil { + return + } + + io.WriteString(writer, url.QueryEscape(string(jsonStr))) + } + + written = true + } + + mime = _MIME_FORM_URLENCODED + return +} + +func (params Params) encodeMultipartForm(writer io.Writer) (mime string, err error) { + w := multipart.NewWriter(writer) + defer func() { + w.Close() + mime = w.FormDataContentType() + }() + + for k, v := range params { + switch value := v.(type) { + case *binaryData: + var dst io.Writer + dst, err = w.CreateFormFile(k, value.Filename) + + if err != nil { + return + } + + _, err = io.Copy(dst, value.Source) + + if err != nil { + return + } + + case *binaryFile: + var dst io.Writer + var file *os.File + var path string + + dst, err = w.CreateFormFile(k, value.Filename) + + if err != nil { + return + } + + if value.Path == "" { + path = value.Filename + } else { + path = value.Path + } + + file, err = os.Open(path) + + if err != nil { + return + } + + _, err = io.Copy(dst, file) + + if err != nil { + return + } + + default: + var dst io.Writer + var jsonStr []byte + + dst, err = w.CreateFormField(k) + + if reflect.TypeOf(v).Kind() == reflect.String { + io.WriteString(dst, reflect.ValueOf(v).String()) + } else { + jsonStr, err = json.Marshal(v) + + if err != nil { + return + } + + _, err = dst.Write(jsonStr) + + if err != nil { + return + } + } + } + } + + return +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/result.go b/Godeps/_workspace/src/github.com/huandu/facebook/result.go new file mode 100644 index 000000000..fd760be4e --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/result.go @@ -0,0 +1,1097 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +// MakeResult makes a Result from facebook Graph API response. +func MakeResult(jsonBytes []byte) (Result, error) { + res := Result{} + err := makeResult(jsonBytes, &res) + + if err != nil { + return nil, err + } + + // facebook may return an error + return res, res.Err() +} + +func makeResult(jsonBytes []byte, res interface{}) error { + if bytes.Equal(jsonBytes, facebookSuccessJsonBytes) { + return nil + } + + jsonReader := bytes.NewReader(jsonBytes) + dec := json.NewDecoder(jsonReader) + + // issue #19 + // app_scoped user_id in a post-Facebook graph 2.0 would exceeds 2^53. + // use Number instead of float64 to avoid precision lost. + dec.UseNumber() + + err := dec.Decode(res) + + if err != nil { + typ := reflect.TypeOf(res) + + if typ != nil { + // if res is a slice, jsonBytes may be a facebook error. + // try to decode it as Error. + kind := typ.Kind() + + if kind == reflect.Ptr { + typ = typ.Elem() + kind = typ.Kind() + } + + if kind == reflect.Array || kind == reflect.Slice { + var errRes Result + err = makeResult(jsonBytes, &errRes) + + if err != nil { + return err + } + + err = errRes.Err() + + if err == nil { + err = fmt.Errorf("cannot format facebook response. expect an array but get an object.") + } + + return err + } + } + + return fmt.Errorf("cannot format facebook response. %v", err) + } + + return nil +} + +// Get gets a field from Result. +// +// Field can be a dot separated string. +// If field name is "a.b.c", it will try to return value of res["a"]["b"]["c"]. +// +// To access array items, use index value in field. +// For instance, field "a.0.c" means to read res["a"][0]["c"]. +// +// It doesn't work with Result which has a key contains dot. Use GetField in this case. +// +// Returns nil if field doesn't exist. +func (res Result) Get(field string) interface{} { + if field == "" { + return res + } + + f := strings.Split(field, ".") + return res.get(f) +} + +// GetField gets a field from Result. +// +// Arguments are treated as keys to access value in Result. +// If arguments are "a","b","c", it will try to return value of res["a"]["b"]["c"]. +// +// To access array items, use index value as a string. +// For instance, args of "a", "0", "c" means to read res["a"][0]["c"]. +// +// Returns nil if field doesn't exist. +func (res Result) GetField(fields ...string) interface{} { + if len(fields) == 0 { + return res + } + + return res.get(fields) +} + +func (res Result) get(fields []string) interface{} { + v, ok := res[fields[0]] + + if !ok || v == nil { + return nil + } + + if len(fields) == 1 { + return v + } + + value := getValueField(reflect.ValueOf(v), fields[1:]) + + if !value.IsValid() { + return nil + } + + return value.Interface() +} + +func getValueField(value reflect.Value, fields []string) reflect.Value { + valueType := value.Type() + kind := valueType.Kind() + field := fields[0] + + switch kind { + case reflect.Array, reflect.Slice: + // field must be a number. + n, err := strconv.ParseUint(field, 10, 0) + + if err != nil { + return reflect.Value{} + } + + if n >= uint64(value.Len()) { + return reflect.Value{} + } + + // work around a reflect package pitfall. + value = reflect.ValueOf(value.Index(int(n)).Interface()) + + case reflect.Map: + v := value.MapIndex(reflect.ValueOf(field)) + + if !v.IsValid() { + return v + } + + // get real value type. + value = reflect.ValueOf(v.Interface()) + + default: + return reflect.Value{} + } + + if len(fields) == 1 { + return value + } + + return getValueField(value, fields[1:]) +} + +// Decode decodes full result to a struct. +// It only decodes fields defined in the struct. +// +// As all facebook response fields are lower case strings, +// Decode will convert all camel-case field names to lower case string. +// e.g. field name "FooBar" will be converted to "foo_bar". +// The side effect is that if a struct has 2 fields with only capital +// differences, decoder will map these fields to a same result value. +// +// If a field is missing in the result, Decode keeps it unchanged by default. +// +// Decode can read struct field tag value to change default behavior. +// +// Examples: +// +// type Foo struct { +// // "id" must exist in response. note the leading comma. +// Id string `facebook:",required"` +// +// // use "name" as field name in response. +// TheName string `facebook:"name"` +// } +// +// To change default behavior, set a struct tag `facebook:",required"` to fields +// should not be missing. +// +// Returns error if v is not a struct or any required v field name absents in res. +func (res Result) Decode(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + + err = r.(error) + } + }() + + err = res.decode(reflect.ValueOf(v), "") + return +} + +// DecodeField decodes a field of result to any type, including struct. +// Field name format is defined in Result.Get(). +// +// More details about decoding struct see Result.Decode(). +func (res Result) DecodeField(field string, v interface{}) error { + f := res.Get(field) + + if f == nil { + return fmt.Errorf("field '%v' doesn't exist in result.", field) + } + + return decodeField(reflect.ValueOf(f), reflect.ValueOf(v), field) +} + +// Err returns an error if Result is a Graph API error. +// +// The returned error can be converted to Error by type assertion. +// err := res.Err() +// if err != nil { +// if e, ok := err.(*Error); ok { +// // read more details in e.Message, e.Code and e.Type +// } +// } +// +// For more information about Graph API Errors, see +// https://developers.facebook.com/docs/reference/api/errors/ +func (res Result) Err() error { + var err Error + e := res.DecodeField("error", &err) + + // no "error" in result. result is not an error. + if e != nil { + return nil + } + + // code may be missing in error. + // assign a non-zero value to it. + if err.Code == 0 { + err.Code = ERROR_CODE_UNKNOWN + } + + return &err +} + +// Paging creates a PagingResult for this Result and +// returns error if the Result cannot be used for paging. +// +// Facebook uses following JSON structure to response paging information. +// If "data" doesn't present in Result, Paging will return error. +// { +// "data": [...], +// "paging": { +// "previous": "https://graph.facebook.com/...", +// "next": "https://graph.facebook.com/..." +// } +// } +func (res Result) Paging(session *Session) (*PagingResult, error) { + return newPagingResult(session, res) +} + +// Batch creates a BatchResult for this result and +// returns error if the Result is not a batch api response. +// +// See BatchApi document for a sample usage. +func (res Result) Batch() (*BatchResult, error) { + return newBatchResult(res) +} + +// DebugInfo creates a DebugInfo for this result if this result +// has "__debug__" key. +func (res Result) DebugInfo() *DebugInfo { + var info Result + err := res.DecodeField(debugInfoKey, &info) + + if err != nil { + return nil + } + + debugInfo := &DebugInfo{} + info.DecodeField("messages", &debugInfo.Messages) + + if proto, ok := info[debugProtoKey]; ok { + if v, ok := proto.(string); ok { + debugInfo.Proto = v + } + } + + if header, ok := info[debugHeaderKey]; ok { + if v, ok := header.(http.Header); ok { + debugInfo.Header = v + + debugInfo.FacebookApiVersion = v.Get(facebookApiVersionHeader) + debugInfo.FacebookDebug = v.Get(facebookDebugHeader) + debugInfo.FacebookRev = v.Get(facebookRevHeader) + } + } + + return debugInfo +} + +func (res Result) decode(v reflect.Value, fullName string) error { + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + return fmt.Errorf("output value must be a struct.") + } + + if !v.CanSet() { + return fmt.Errorf("output value cannot be set.") + } + + if fullName != "" { + fullName += "." + } + + var field reflect.Value + var name, fbTag string + var val interface{} + var ok, required bool + var err error + + vType := v.Type() + num := vType.NumField() + + for i := 0; i < num; i++ { + name = "" + required = false + field = v.Field(i) + fbTag = vType.Field(i).Tag.Get("facebook") + + // parse struct field tag + if fbTag != "" { + index := strings.IndexRune(fbTag, ',') + + if index == -1 { + name = fbTag + } else { + name = fbTag[:index] + + if fbTag[index:] == ",required" { + required = true + } + } + } + + if name == "" { + name = camelCaseToUnderScore(v.Type().Field(i).Name) + } + + val, ok = res[name] + + if !ok { + // check whether the field is required. if so, report error. + if required { + return fmt.Errorf("cannot find field '%v%v' in result.", fullName, name) + } + + continue + } + + if err = decodeField(reflect.ValueOf(val), field, fmt.Sprintf("%v%v", fullName, name)); err != nil { + return err + } + } + + return nil +} + +func decodeField(val reflect.Value, field reflect.Value, fullName string) error { + if field.Kind() == reflect.Ptr { + // reset Ptr field if val is nil. + if !val.IsValid() { + if !field.IsNil() && field.CanSet() { + field.Set(reflect.Zero(field.Type())) + } + + return nil + } + + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + + field = field.Elem() + } + + if !field.CanSet() { + return fmt.Errorf("field '%v' cannot be decoded. make sure the output value is able to be set.", fullName) + } + + if !val.IsValid() { + return fmt.Errorf("field '%v' is not a pointer. cannot assign nil to it.", fullName) + } + + kind := field.Kind() + valType := val.Type() + + switch kind { + case reflect.Bool: + if valType.Kind() == reflect.Bool { + field.SetBool(val.Bool()) + } else { + return fmt.Errorf("field '%v' is not a bool in result.", fullName) + } + + case reflect.Int8: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < -128 || n > 127 { + return fmt.Errorf("field '%v' value exceeds the range of int8.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 127 { + return fmt.Errorf("field '%v' value exceeds the range of int8.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < -128 || n > 127 { + return fmt.Errorf("field '%v' value exceeds the range of int8.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseInt(val.String(), 10, 8) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid int8.", fullName) + } + + field.SetInt(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Int16: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < -32768 || n > 32767 { + return fmt.Errorf("field '%v' value exceeds the range of int16.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 32767 { + return fmt.Errorf("field '%v' value exceeds the range of int16.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < -32768 || n > 32767 { + return fmt.Errorf("field '%v' value exceeds the range of int16.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseInt(val.String(), 10, 16) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid int16.", fullName) + } + + field.SetInt(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Int32: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < -2147483648 || n > 2147483647 { + return fmt.Errorf("field '%v' value exceeds the range of int32.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 2147483647 { + return fmt.Errorf("field '%v' value exceeds the range of int32.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < -2147483648 || n > 2147483647 { + return fmt.Errorf("field '%v' value exceeds the range of int32.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseInt(val.String(), 10, 32) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid int32.", fullName) + } + + field.SetInt(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Int64: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + field.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 9223372036854775807 { + return fmt.Errorf("field '%v' value exceeds the range of int64.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < -9223372036854775808 || n > 9223372036854775807 { + return fmt.Errorf("field '%v' value exceeds the range of int64.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseInt(val.String(), 10, 64) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid int64.", fullName) + } + + field.SetInt(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Int: + bits := field.Type().Bits() + + var min, max int64 + + if bits == 32 { + min = -2147483648 + max = 2147483647 + } else if bits == 64 { + min = -9223372036854775808 + max = 9223372036854775807 + } + + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < min || n > max { + return fmt.Errorf("field '%v' value exceeds the range of int.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > uint64(max) { + return fmt.Errorf("field '%v' value exceeds the range of int.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < float64(min) || n > float64(max) { + return fmt.Errorf("field '%v' value exceeds the range of int.", fullName) + } + + field.SetInt(int64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseInt(val.String(), 10, bits) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid int%v.", fullName, bits) + } + + field.SetInt(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Uint8: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < 0 || n > 0xFF { + return fmt.Errorf("field '%v' value exceeds the range of uint8.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 0xFF { + return fmt.Errorf("field '%v' value exceeds the range of uint8.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < 0 || n > 0xFF { + return fmt.Errorf("field '%v' value exceeds the range of uint8.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseUint(val.String(), 10, 8) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid uint8.", fullName) + } + + field.SetUint(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Uint16: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < 0 || n > 0xFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint16.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 0xFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint16.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < 0 || n > 0xFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint16.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseUint(val.String(), 10, 16) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid uint16.", fullName) + } + + field.SetUint(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Uint32: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < 0 || n > 0xFFFFFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint32.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > 0xFFFFFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint32.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < 0 || n > 0xFFFFFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint32.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseUint(val.String(), 10, 32) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid uint32.", fullName) + } + + field.SetUint(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Uint64: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < 0 { + return fmt.Errorf("field '%v' value exceeds the range of uint64.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + field.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < 0 || n > 0xFFFFFFFFFFFFFFFF { + return fmt.Errorf("field '%v' value exceeds the range of uint64.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseUint(val.String(), 10, 64) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid uint64.", fullName) + } + + field.SetUint(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Uint: + bits := field.Type().Bits() + + var max uint64 + + if bits == 32 { + max = 0xFFFFFFFF + } else if bits == 64 { + max = 0xFFFFFFFFFFFFFFFF + } + + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + + if n < 0 || uint64(n) > max { + return fmt.Errorf("field '%v' value exceeds the range of uint.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + + if n > max { + return fmt.Errorf("field '%v' value exceeds the range of uint.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + + if n < 0 || n > float64(max) { + return fmt.Errorf("field '%v' value exceeds the range of uint.", fullName) + } + + field.SetUint(uint64(n)) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseUint(val.String(), 10, bits) + + if err != nil { + return fmt.Errorf("field '%v' value is not a valid uint%v.", fullName, bits) + } + + field.SetUint(n) + + default: + return fmt.Errorf("field '%v' is not an integer in result.", fullName) + } + + case reflect.Float32, reflect.Float64: + switch valType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := val.Int() + field.SetFloat(float64(n)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := val.Uint() + field.SetFloat(float64(n)) + + case reflect.Float32, reflect.Float64: + n := val.Float() + field.SetFloat(n) + + case reflect.String: + // only json.Number is allowed to be used as number. + if val.Type() != typeOfJSONNumber { + return fmt.Errorf("field '%v' value is string, not a number.", fullName) + } + + n, err := strconv.ParseFloat(val.String(), 64) + + if err != nil { + return fmt.Errorf("field '%v' is not a valid float64.", fullName) + } + + field.SetFloat(n) + + default: + return fmt.Errorf("field '%v' is not a float in result.", fullName) + } + + case reflect.String: + if valType.Kind() != reflect.String { + return fmt.Errorf("field '%v' is not a string in result.", fullName) + } + + field.SetString(val.String()) + + case reflect.Struct: + if field.Type().ConvertibleTo(typeOfTime) { + if valType.Kind() != reflect.String { + return fmt.Errorf("field '%v' is not a string in result.", fullName) + } + + t, err := time.Parse("2006-01-02T15:04:05-0700", val.String()) + + if err != nil { + return fmt.Errorf("field '%v' was unable to parse the time string '%s'.", fullName, val.String()) + } + + matchedType := reflect.ValueOf(t).Convert(field.Type()) + field.Set(matchedType) + return nil + } + + if valType.Kind() != reflect.Map || valType.Key().Kind() != reflect.String { + return fmt.Errorf("field '%v' is not a json object in result.", fullName) + } + + // safe convert val to Result. type assertion doesn't work in this case. + var r Result + reflect.ValueOf(&r).Elem().Set(val) + + if err := r.decode(field, fullName); err != nil { + return err + } + + case reflect.Map: + if valType.Kind() != reflect.Map || valType.Key().Kind() != reflect.String { + return fmt.Errorf("field '%v' is not a json object in result.", fullName) + } + + // map key must be string + if field.Type().Key().Kind() != reflect.String { + return fmt.Errorf("field '%v' in struct is a map with non-string key type. it's not allowed.", fullName) + } + + var needAddr bool + valueType := field.Type().Elem() + + // shortcut for map[string]interface{}. + if valueType.Kind() == reflect.Interface { + field.Set(val) + break + } + + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + needAddr = true + } + + for _, key := range val.MapKeys() { + // val.MapIndex(key) returns a Value with wrong type. + // use following trick to get correct Value. + value := reflect.ValueOf(val.MapIndex(key).Interface()) + newValue := reflect.New(valueType) + + if err := decodeField(value, newValue, fmt.Sprintf("%v.%v", fullName, key)); err != nil { + return err + } + + if needAddr { + field.SetMapIndex(key, newValue) + } else { + field.SetMapIndex(key, newValue.Elem()) + } + } + + case reflect.Slice, reflect.Array: + if valType.Kind() != reflect.Slice && valType.Kind() != reflect.Array { + return fmt.Errorf("field '%v' is not a json array in result.", fullName) + } + + valLen := val.Len() + + if kind == reflect.Array { + if field.Len() < valLen { + return fmt.Errorf("cannot copy all field '%v' values to struct. expected len is %v. actual len is %v.", + fullName, field.Len(), valLen) + } + } + + var slc reflect.Value + var needAddr bool + + valueType := field.Type().Elem() + + // shortcut for array of interface + if valueType.Kind() == reflect.Interface { + if kind == reflect.Array { + for i := 0; i < valLen; i++ { + field.Index(i).Set(val.Index(i)) + } + } else { // kind is slice + field.Set(val) + } + + break + } + + if kind == reflect.Array { + slc = field.Slice(0, valLen) + } else { + // kind is slice + slc = reflect.MakeSlice(field.Type(), valLen, valLen) + field.Set(slc) + } + + if valueType.Kind() == reflect.Ptr { + needAddr = true + valueType = valueType.Elem() + } + + for i := 0; i < valLen; i++ { + // val.Index(i) returns a Value with wrong type. + // use following trick to get correct Value. + valIndexValue := reflect.ValueOf(val.Index(i).Interface()) + newValue := reflect.New(valueType) + + if err := decodeField(valIndexValue, newValue, fmt.Sprintf("%v.%v", fullName, i)); err != nil { + return err + } + + if needAddr { + slc.Index(i).Set(newValue) + } else { + slc.Index(i).Set(newValue.Elem()) + } + } + + default: + return fmt.Errorf("field '%v' in struct uses unsupported type '%v'.", fullName, kind) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/session.go b/Godeps/_workspace/src/github.com/huandu/facebook/session.go new file mode 100644 index 000000000..95b4ad8d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/session.go @@ -0,0 +1,667 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "strings" +) + +// Makes a facebook graph api call. +// +// If session access token is set, "access_token" in params will be set to the token value. +// +// Returns facebook graph api call result. +// If facebook returns error in response, returns error details in res and set err. +func (session *Session) Api(path string, method Method, params Params) (Result, error) { + return session.graph(path, method, params) +} + +// Get is a short hand of Api(path, GET, params). +func (session *Session) Get(path string, params Params) (Result, error) { + return session.Api(path, GET, params) +} + +// Post is a short hand of Api(path, POST, params). +func (session *Session) Post(path string, params Params) (Result, error) { + return session.Api(path, POST, params) +} + +// Delete is a short hand of Api(path, DELETE, params). +func (session *Session) Delete(path string, params Params) (Result, error) { + return session.Api(path, DELETE, params) +} + +// Put is a short hand of Api(path, PUT, params). +func (session *Session) Put(path string, params Params) (Result, error) { + return session.Api(path, PUT, params) +} + +// Makes a batch call. Each params represent a single facebook graph api call. +// +// BatchApi supports most kinds of batch calls defines in facebook batch api document, +// except uploading binary data. Use Batch to upload binary data. +// +// If session access token is set, the token will be used in batch api call. +// +// Returns an array of batch call result on success. +// +// Facebook document: https://developers.facebook.com/docs/graph-api/making-multiple-requests +func (session *Session) BatchApi(params ...Params) ([]Result, error) { + return session.Batch(nil, params...) +} + +// Makes a batch facebook graph api call. +// Batch is designed for more advanced usage including uploading binary files. +// +// If session access token is set, "access_token" in batchParams will be set to the token value. +// +// Facebook document: https://developers.facebook.com/docs/graph-api/making-multiple-requests +func (session *Session) Batch(batchParams Params, params ...Params) ([]Result, error) { + return session.graphBatch(batchParams, params...) +} + +// Makes a FQL query. +// Returns a slice of Result. If there is no query result, the result is nil. +// +// Facebook document: https://developers.facebook.com/docs/technical-guides/fql#query +func (session *Session) FQL(query string) ([]Result, error) { + res, err := session.graphFQL(Params{ + "q": query, + }) + + if err != nil { + return nil, err + } + + // query result is stored in "data" field. + var data []Result + err = res.DecodeField("data", &data) + + if err != nil { + return nil, err + } + + return data, nil +} + +// Makes a multi FQL query. +// Returns a parsed Result. The key is the multi query key, and the value is the query result. +// +// Here is a multi-query sample. +// +// res, _ := session.MultiFQL(Params{ +// "query1": "SELECT name FROM user WHERE uid = me()", +// "query2": "SELECT uid1, uid2 FROM friend WHERE uid1 = me()", +// }) +// +// // Get query results from response. +// var query1, query2 []Result +// res.DecodeField("query1", &query1) +// res.DecodeField("query2", &query2) +// +// Facebook document: https://developers.facebook.com/docs/technical-guides/fql#multi +func (session *Session) MultiFQL(queries Params) (Result, error) { + res, err := session.graphFQL(Params{ + "q": queries, + }) + + if err != nil { + return res, err + } + + // query result is stored in "data" field. + var data []Result + err = res.DecodeField("data", &data) + + if err != nil { + return nil, err + } + + if data == nil { + return nil, fmt.Errorf("multi-fql result is not found.") + } + + // Multi-fql data structure is: + // { + // "data": [ + // { + // "name": "query1", + // "fql_result_set": [ + // {...}, {...}, ... + // ] + // }, + // { + // "name": "query2", + // "fql_result_set": [ + // {...}, {...}, ... + // ] + // }, + // ... + // ] + // } + // + // Parse the structure to following go map. + // { + // "query1": [ + // // Come from field "fql_result_set". + // {...}, {...}, ... + // ], + // "query2": [ + // {...}, {...}, ... + // ], + // ... + // } + var name string + var apiResponse interface{} + var ok bool + result := Result{} + + for k, v := range data { + err = v.DecodeField("name", &name) + + if err != nil { + return nil, fmt.Errorf("missing required field 'name' in multi-query data.%v. %v", k, err) + } + + apiResponse, ok = v["fql_result_set"] + + if !ok { + return nil, fmt.Errorf("missing required field 'fql_result_set' in multi-query data.%v.", k) + } + + result[name] = apiResponse + } + + return result, nil +} + +// Makes an arbitrary HTTP request. +// It expects server responses a facebook Graph API response. +// request, _ := http.NewRequest("https://graph.facebook.com/538744468", "GET", nil) +// res, err := session.Request(request) +// fmt.Println(res["gender"]) // get "male" +func (session *Session) Request(request *http.Request) (res Result, err error) { + var response *http.Response + var data []byte + + response, data, err = session.sendRequest(request) + + if err != nil { + return + } + + res, err = MakeResult(data) + session.addDebugInfo(res, response) + + if res != nil { + err = res.Err() + } + + return +} + +// Gets current user id from access token. +// +// Returns error if access token is not set or invalid. +// +// It's a standard way to validate a facebook access token. +func (session *Session) User() (id string, err error) { + if session.id != "" { + id = session.id + return + } + + if session.accessToken == "" { + err = fmt.Errorf("access token is not set.") + return + } + + var result Result + result, err = session.Api("/me", GET, Params{"fields": "id"}) + + if err != nil { + return + } + + err = result.DecodeField("id", &id) + + if err != nil { + return + } + + return +} + +// Validates Session access token. +// Returns nil if access token is valid. +func (session *Session) Validate() (err error) { + if session.accessToken == "" { + err = fmt.Errorf("access token is not set.") + return + } + + var result Result + result, err = session.Api("/me", GET, Params{"fields": "id"}) + + if err != nil { + return + } + + if f := result.Get("id"); f == nil { + err = fmt.Errorf("invalid access token.") + return + } + + return +} + +// Inspect Session access token. +// Returns JSON array containing data about the inspected token. +// See https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow/v2.2#checktoken +func (session *Session) Inspect() (result Result, err error) { + if session.accessToken == "" { + err = fmt.Errorf("access token is not set.") + return + } + + if session.app == nil { + err = fmt.Errorf("cannot inspect access token without binding an app.") + return + } + + appAccessToken := session.app.AppAccessToken() + + if appAccessToken == "" { + err = fmt.Errorf("app access token is not set.") + return + } + + result, err = session.Api("/debug_token", GET, Params{ + "input_token": session.accessToken, + "access_token": appAccessToken, + }) + + if err != nil { + return + } + + // facebook stores everything, including error, inside result["data"]. + // make sure that result["data"] exists and doesn't contain error. + if _, ok := result["data"]; !ok { + err = fmt.Errorf("facebook inspect api returns unexpected result.") + return + } + + var data Result + result.DecodeField("data", &data) + result = data + err = result.Err() + return +} + +// Gets current access token. +func (session *Session) AccessToken() string { + return session.accessToken +} + +// Sets a new access token. +func (session *Session) SetAccessToken(token string) { + if token != session.accessToken { + session.id = "" + session.accessToken = token + session.appsecretProof = "" + } +} + +// Check appsecret proof is enabled or not. +func (session *Session) AppsecretProof() string { + if !session.enableAppsecretProof { + return "" + } + + if session.accessToken == "" || session.app == nil { + return "" + } + + if session.appsecretProof == "" { + hash := hmac.New(sha256.New, []byte(session.app.AppSecret)) + hash.Write([]byte(session.accessToken)) + session.appsecretProof = hex.EncodeToString(hash.Sum(nil)) + } + + return session.appsecretProof +} + +// Enable or disable appsecret proof status. +// Returns error if there is no App associasted with this Session. +func (session *Session) EnableAppsecretProof(enabled bool) error { + if session.app == nil { + return fmt.Errorf("cannot change appsecret proof status without an associated App.") + } + + if session.enableAppsecretProof != enabled { + session.enableAppsecretProof = enabled + + // reset pre-calculated proof here to give caller a way to do so in some rare case, + // e.g. associated app's secret is changed. + session.appsecretProof = "" + } + + return nil +} + +// Gets associated App. +func (session *Session) App() *App { + return session.app +} + +// Debug returns current debug mode. +func (session *Session) Debug() DebugMode { + if session.debug != DEBUG_OFF { + return session.debug + } + + return Debug +} + +// SetDebug updates per session debug mode and returns old mode. +// If per session debug mode is DEBUG_OFF, session will use global +// Debug mode. +func (session *Session) SetDebug(debug DebugMode) DebugMode { + old := session.debug + session.debug = debug + return old +} + +func (session *Session) graph(path string, method Method, params Params) (res Result, err error) { + var graphUrl string + + if params == nil { + params = Params{} + } + + // always format as json. + params["format"] = "json" + + // overwrite method as we always use post + params["method"] = method + + // get graph api url. + if session.isVideoPost(path, method) { + graphUrl = session.getUrl("graph_video", path, nil) + } else { + graphUrl = session.getUrl("graph", path, nil) + } + + var response *http.Response + response, err = session.sendPostRequest(graphUrl, params, &res) + session.addDebugInfo(res, response) + + if res != nil { + err = res.Err() + } + + return +} + +func (session *Session) graphBatch(batchParams Params, params ...Params) ([]Result, error) { + if batchParams == nil { + batchParams = Params{} + } + + batchParams["batch"] = params + + var res []Result + graphUrl := session.getUrl("graph", "", nil) + _, err := session.sendPostRequest(graphUrl, batchParams, &res) + return res, err +} + +func (session *Session) graphFQL(params Params) (res Result, err error) { + if params == nil { + params = Params{} + } + + session.prepareParams(params) + + // encode url. + buf := &bytes.Buffer{} + buf.WriteString(domainMap["graph"]) + buf.WriteString("fql?") + _, err = params.Encode(buf) + + if err != nil { + return nil, fmt.Errorf("cannot encode params. %v", err) + } + + // it seems facebook disallow POST to /fql. always use GET for FQL. + var response *http.Response + response, err = session.sendGetRequest(buf.String(), &res) + session.addDebugInfo(res, response) + + if res != nil { + err = res.Err() + } + + return +} + +func (session *Session) prepareParams(params Params) { + if _, ok := params["access_token"]; !ok && session.accessToken != "" { + params["access_token"] = session.accessToken + } + + if session.enableAppsecretProof && session.accessToken != "" && session.app != nil { + params["appsecret_proof"] = session.AppsecretProof() + } + + debug := session.Debug() + + if debug != DEBUG_OFF { + params["debug"] = debug + } +} + +func (session *Session) sendGetRequest(uri string, res interface{}) (*http.Response, error) { + request, err := http.NewRequest("GET", uri, nil) + + if err != nil { + return nil, err + } + + response, data, err := session.sendRequest(request) + + if err != nil { + return response, err + } + + err = makeResult(data, res) + return response, err +} + +func (session *Session) sendPostRequest(uri string, params Params, res interface{}) (*http.Response, error) { + session.prepareParams(params) + + buf := &bytes.Buffer{} + mime, err := params.Encode(buf) + + if err != nil { + return nil, fmt.Errorf("cannot encode POST params. %v", err) + } + + var request *http.Request + + request, err = http.NewRequest("POST", uri, buf) + + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", mime) + response, data, err := session.sendRequest(request) + + if err != nil { + return response, err + } + + err = makeResult(data, res) + return response, err +} + +func (session *Session) sendOauthRequest(uri string, params Params) (Result, error) { + urlStr := session.getUrl("graph", uri, nil) + buf := &bytes.Buffer{} + mime, err := params.Encode(buf) + + if err != nil { + return nil, fmt.Errorf("cannot encode POST params. %v", err) + } + + var request *http.Request + + request, err = http.NewRequest("POST", urlStr, buf) + + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", mime) + _, data, err := session.sendRequest(request) + + if err != nil { + return nil, err + } + + if len(data) == 0 { + return nil, fmt.Errorf("empty response from facebook") + } + + // facebook may return a query string. + if 'a' <= data[0] && data[0] <= 'z' { + query, err := url.ParseQuery(string(data)) + + if err != nil { + return nil, err + } + + // convert a query to Result. + res := Result{} + + for k := range query { + res[k] = query.Get(k) + } + + return res, nil + } + + res, err := MakeResult(data) + return res, err +} + +func (session *Session) sendRequest(request *http.Request) (response *http.Response, data []byte, err error) { + if session.HttpClient == nil { + response, err = http.DefaultClient.Do(request) + } else { + response, err = session.HttpClient.Do(request) + } + + if err != nil { + err = fmt.Errorf("cannot reach facebook server. %v", err) + return + } + + buf := &bytes.Buffer{} + _, err = io.Copy(buf, response.Body) + response.Body.Close() + + if err != nil { + err = fmt.Errorf("cannot read facebook response. %v", err) + } + + data = buf.Bytes() + return +} + +func (session *Session) isVideoPost(path string, method Method) bool { + return method == POST && regexpIsVideoPost.MatchString(path) +} + +func (session *Session) getUrl(name, path string, params Params) string { + offset := 0 + + if path != "" && path[0] == '/' { + offset = 1 + } + + buf := &bytes.Buffer{} + buf.WriteString(domainMap[name]) + + // facebook versioning. + if session.Version == "" { + if Version != "" { + buf.WriteString(Version) + buf.WriteRune('/') + } + } else { + buf.WriteString(session.Version) + buf.WriteRune('/') + } + + buf.WriteString(string(path[offset:])) + + if params != nil { + buf.WriteRune('?') + params.Encode(buf) + } + + return buf.String() +} + +func (session *Session) addDebugInfo(res Result, response *http.Response) Result { + if session.Debug() == DEBUG_OFF || res == nil || response == nil { + return res + } + + debugInfo := make(map[string]interface{}) + + // save debug information in result directly. + res.DecodeField("__debug__", &debugInfo) + debugInfo[debugProtoKey] = response.Proto + debugInfo[debugHeaderKey] = response.Header + + res["__debug__"] = debugInfo + return res +} + +func decodeBase64URLEncodingString(data string) ([]byte, error) { + buf := bytes.NewBufferString(data) + + // go's URLEncoding implementation requires base64 padding. + if m := len(data) % 4; m != 0 { + buf.WriteString(strings.Repeat("=", 4-m)) + } + + reader := base64.NewDecoder(base64.URLEncoding, buf) + output := &bytes.Buffer{} + _, err := io.Copy(output, reader) + + if err != nil { + return nil, err + } + + return output.Bytes(), nil +} diff --git a/Godeps/_workspace/src/github.com/huandu/facebook/type.go b/Godeps/_workspace/src/github.com/huandu/facebook/type.go new file mode 100644 index 000000000..d09865415 --- /dev/null +++ b/Godeps/_workspace/src/github.com/huandu/facebook/type.go @@ -0,0 +1,127 @@ +// A facebook graph api client in go. +// https://github.com/huandu/facebook/ +// +// Copyright 2012 - 2015, Huan Du +// Licensed under the MIT license +// https://github.com/huandu/facebook/blob/master/LICENSE + +package facebook + +import ( + "io" + "net/http" +) + +// Holds facebook application information. +type App struct { + // Facebook app id + AppId string + + // Facebook app secret + AppSecret string + + // Facebook app redirect URI in the app's configuration. + RedirectUri string + + // Enable appsecret proof in every API call to facebook. + // Facebook document: https://developers.facebook.com/docs/graph-api/securing-requests + EnableAppsecretProof bool +} + +// An interface to send http request. +// This interface is designed to be compatible with type `*http.Client`. +type HttpClient interface { + Do(req *http.Request) (resp *http.Response, err error) + Get(url string) (resp *http.Response, err error) + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +// Holds a facebook session with an access token. +// Session should be created by App.Session or App.SessionFromSignedRequest. +type Session struct { + HttpClient HttpClient + Version string // facebook versioning. + + accessToken string // facebook access token. can be empty. + app *App + id string + + enableAppsecretProof bool // add "appsecret_proof" parameter in every facebook API call. + appsecretProof string // pre-calculated "appsecret_proof" value. + + debug DebugMode // using facebook debugging api in every request. +} + +// API HTTP method. +// Can be GET, POST or DELETE. +type Method string + +// Graph API debug mode. +// See https://developers.facebook.com/docs/graph-api/using-graph-api/v2.3#graphapidebugmode +type DebugMode string + +// API params. +// +// For general uses, just use Params as a ordinary map. +// +// For advanced uses, use MakeParams to create Params from any struct. +type Params map[string]interface{} + +// Facebook API call result. +type Result map[string]interface{} + +// Represents facebook API call result with paging information. +type PagingResult struct { + session *Session + paging pagingData + previous string + next string +} + +// Represents facebook batch API call result. +// See https://developers.facebook.com/docs/graph-api/making-multiple-requests/#multiple_methods. +type BatchResult struct { + StatusCode int // HTTP status code. + Header http.Header // HTTP response headers. + Body string // Raw HTTP response body string. + Result Result // Facebook api result parsed from body. +} + +// Facebook API error. +type Error struct { + Message string + Type string + Code int + ErrorSubcode int // subcode for authentication related errors. +} + +// Binary data. +type binaryData struct { + Filename string // filename used in multipart form writer. + Source io.Reader // file data source. +} + +// Binary file. +type binaryFile struct { + Filename string // filename used in multipart form writer. + Path string // path to file. must be readable. +} + +// DebugInfo is the debug information returned by facebook when debug mode is enabled. +type DebugInfo struct { + Messages []DebugMessage // debug messages. it can be nil if there is no message. + Header http.Header // all HTTP headers for this response. + Proto string // HTTP protocol name for this response. + + // Facebook debug HTTP headers. + FacebookApiVersion string // the actual graph API version provided by facebook-api-version HTTP header. + FacebookDebug string // the X-FB-Debug HTTP header. + FacebookRev string // the x-fb-rev HTTP header. +} + +// DebugMessage is one debug message in "__debug__" of graph API response. +type DebugMessage struct { + Type string + Message string + Link string +} diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/.travis.yml b/Godeps/_workspace/src/github.com/mssola/user_agent/.travis.yml new file mode 100644 index 000000000..922091263 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip +matrix: + allow_failures: + - go: tip diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/LICENSE b/Godeps/_workspace/src/github.com/mssola/user_agent/LICENSE new file mode 100644 index 000000000..21d3935c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2015 Miquel Sabaté Solà + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/all_test.go b/Godeps/_workspace/src/github.com/mssola/user_agent/all_test.go new file mode 100644 index 000000000..6dca19d5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/all_test.go @@ -0,0 +1,257 @@ +// Copyright (C) 2012-2015 Miquel Sabaté Solà +// This file is licensed under the MIT license. +// See the LICENSE file. + +package user_agent + +import ( + "fmt" + "testing" +) + +// Slice that contains all the tests. Each test is contained in a struct +// that groups the name of the test and the User-Agent string to be tested. +var uastrings = []struct { + name string + ua string +}{ + // Bots + {"GoogleBot", "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"}, + {"GoogleBotSmartphone", "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"}, + {"BingBot", "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"}, + {"BaiduBot", "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"}, + {"Twitterbot", "Twitterbot"}, + {"YahooBot", "Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)"}, + {"FacebookExternalHit", "facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)"}, + {"FacebookPlatform", "facebookplatform/1.0 (+http://developers.facebook.com)"}, + {"FaceBot", "Facebot"}, + + // Internet Explorer + {"IE10", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)"}, + {"Tablet", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.2; ARM; Trident/6.0; Touch; .NET4.0E; .NET4.0C; Tablet PC 2.0)"}, + {"Touch", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)"}, + {"Phone", "Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; SGH-i917)"}, + {"IE6", "Mozilla/4.0 (compatible; MSIE6.0; Windows NT 5.0; .NET CLR 1.1.4322)"}, + {"IE8Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)"}, + {"IE10Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)"}, + {"IE11Win81", "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"}, + {"IE11Win7", "Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko"}, + {"IE11b32Win7b64", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"}, + {"IE11b32Win7b64MDDRJS", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; MDDRJS; rv:11.0) like Gecko"}, + {"IE11Compatibility", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0)"}, + + // Gecko + {"FirefoxMac", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) Gecko/20100101 Firefox/4.0b8"}, + {"FirefoxMacLoc", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"}, + {"FirefoxLinux", "Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0"}, + {"FirefoxWin", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14"}, + {"Firefox29Win7", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0"}, + {"CaminoMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.14) Gecko/20080409 Camino/1.6 (like Firefox/2.0.0.14)"}, + {"Iceweasel", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Iceweasel/2.0 (Debian-2.0+dfsg-1)"}, + {"SeaMonkey", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0"}, + {"AndroidFirefox", "Mozilla/5.0 (Android; Mobile; rv:17.0) Gecko/17.0 Firefox/17.0"}, + {"AndroidFirefoxTablet", "Mozilla/5.0 (Android; Tablet; rv:26.0) Gecko/26.0 Firefox/26.0"}, + {"FirefoxOS", "Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0"}, + {"FirefoxOSTablet", "Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0"}, + {"FirefoxWinXP", "Mozilla/5.0 (Windows NT 5.2; rv:31.0) Gecko/20100101 Firefox/31.0"}, + {"FirefoxMRA", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:24.0) Gecko/20130405 MRA 5.5 (build 02842) Firefox/24.0 (.NET CLR 3.5.30729)"}, + + // Opera + {"OperaMac", "Opera/9.27 (Macintosh; Intel Mac OS X; U; en)"}, + {"OperaWin", "Opera/9.27 (Windows NT 5.1; U; en)"}, + {"OperaWinNoLocale", "Opera/9.80 (Windows NT 5.1) Presto/2.12.388 Version/12.10"}, + {"OperaWin2Comment", "Opera/9.80 (Windows NT 6.0; WOW64) Presto/2.12.388 Version/12.15"}, + {"OperaMinimal", "Opera/9.80"}, + {"OperaFull", "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.2.15 Version/10.10"}, + {"OperaLinux", "Opera/9.80 (X11; Linux x86_64) Presto/2.12.388 Version/12.10"}, + {"OperaAndroid", "Opera/9.80 (Android 4.2.1; Linux; Opera Mobi/ADR-1212030829) Presto/2.11.355 Version/12.10"}, + {"OperaNested", "Opera/9.80 (Windows NT 5.1; MRA 6.0 (build 5831)) Presto/2.12.388 Version/12.10"}, + {"OperaMRA", "Opera/9.80 (Windows NT 6.1; U; MRA 5.8 (build 4139); en) Presto/2.9.168 Version/11.50"}, + + // Other + {"Empty", ""}, + {"Nil", "nil"}, + {"Compatible", "Mozilla/4.0 (compatible)"}, + {"Mozilla", "Mozilla/5.0"}, + {"Amaya", "amaya/9.51 libwww/5.4.0"}, + {"Rails", "Rails Testing"}, + {"Python", "Python-urllib/2.7"}, + {"Curl", "curl/7.28.1"}, + + // WebKit + {"ChromeLinux", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11"}, + {"ChromeWin7", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19"}, + {"ChromeMinimal", "Mozilla/5.0 AppleWebKit/534.10 Chrome/8.0.552.215 Safari/534.10"}, + {"ChromeMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.231 Safari/534.10"}, + {"SafariMac", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16"}, + {"SafariWin", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en) AppleWebKit/526.9 (KHTML, like Gecko) Version/4.0dp1 Safari/526.8"}, + {"iPhone7", "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53"}, + {"iPhone", "Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419"}, + {"iPod", "Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419"}, + {"iPad", "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 Safari/531.21.10"}, + {"webOS", "Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1"}, + {"Android", "Mozilla/5.0 (Linux; U; Android 1.5; de-; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"}, + {"BlackBerry", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1+"}, + {"BB10", "Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+"}, + {"Ericsson", "Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525"}, + {"ChromeAndroid", "Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19"}, + {"WebkitNoPlatform", "Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13"}, + {"OperaWebkitMobile", "Mozilla/5.0 (Linux; Android 4.2.2; Galaxy Nexus Build/JDQ39) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Mobile Safari/537.31 OPR/14.0.1074.57453"}, + {"OperaWebkitDesktop", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Safari/537.31 OPR/14.0.1074.57453"}, + {"ChromeNothingAfterU", "Mozilla/5.0 (Linux; U) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4"}, + {"SafariOnSymbian", "Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413"}, +} + +// Slice of the expected results from the previous slice. +var expected = []string{ + // Bots + "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:false", + "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:true", + "Mozilla:5.0 Browser:bingbot-2.0 Bot:true Mobile:false", + "Mozilla:5.0 Browser:Baiduspider-2.0 Bot:true Mobile:false", + "Browser:Twitterbot Bot:true Mobile:false", + "Mozilla:5.0 Browser:Yahoo! Slurp Bot:true Mobile:false", + "Browser:facebookexternalhit-1.1 Bot:true Mobile:false", + "Browser:facebookplatform-1.0 Bot:true Mobile:false", + "Browser:Facebot Bot:true Mobile:false", + + // Internet Explorer + "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:4.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:4.0 Platform:Windows OS:Windows Phone OS 7.0 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:true", + "Mozilla:4.0 Platform:Windows OS:Windows 2000 Browser:Internet Explorer-6.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-8.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false", + "Mozilla:4.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:false", + + // Gecko + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Browser:Firefox-4.0b8 Engine:Gecko-20100101 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:Firefox-3.6.13 Engine:Gecko-20101203 Bot:false Mobile:false", + "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Firefox-17.0 Engine:Gecko-20100101 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-2.0.0.14 Engine:Gecko-20080404 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Firefox-29.0 Engine:Gecko-20100101 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Camino-1.6 Engine:Gecko-20080409 Bot:false Mobile:false", + "Mozilla:5.0 Platform:X11 OS:Linux i686 Localization:en-US Browser:Iceweasel-2.0 Engine:Gecko-20061024 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:SeaMonkey-2.0 Engine:Gecko-20091017 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Mobile OS:Android Browser:Firefox-17.0 Engine:Gecko-17.0 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Tablet OS:Android Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Mobile OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Tablet OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Windows OS:Windows XP x64 Edition Browser:Firefox-31.0 Engine:Gecko-20100101 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-24.0 Engine:Gecko-20130405 Bot:false Mobile:false", + + // Opera + "Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false", + "Platform:Windows OS:Windows XP Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false", + "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false", + "Platform:Windows OS:Windows Vista Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false", + "Browser:Opera-9.80 Engine:Presto Bot:false Mobile:false", + "Platform:Windows OS:Windows Vista Localization:en Browser:Opera-9.80 Engine:Presto-2.2.15 Bot:false Mobile:false", + "Platform:X11 OS:Linux x86_64 Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false", + "Platform:Android 4.2.1 OS:Linux Browser:Opera-9.80 Engine:Presto-2.11.355 Bot:false Mobile:true", + "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false", + "Platform:Windows OS:Windows 7 Localization:en Browser:Opera-9.80 Engine:Presto-2.9.168 Bot:false Mobile:false", + + // Other + "Bot:false Mobile:false", + "Browser:nil Bot:false Mobile:false", + "Browser:Mozilla-4.0 Bot:false Mobile:false", + "Browser:Mozilla-5.0 Bot:false Mobile:false", + "Browser:amaya-9.51 Engine:libwww-5.4.0 Bot:false Mobile:false", + "Browser:Rails Engine:Testing Bot:false Mobile:false", + "Browser:Python-urllib-2.7 Bot:false Mobile:false", + "Browser:curl-7.28.1 Bot:false Mobile:false", + + // WebKit + "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chrome-23.0.1271.97 Engine:AppleWebKit-537.11 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Chrome-18.0.1025.168 Engine:AppleWebKit-535.19 Bot:false Mobile:false", + "Mozilla:5.0 Browser:Chrome-8.0.552.215 Engine:AppleWebKit-534.10 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_5 Localization:en-US Browser:Chrome-8.0.552.231 Engine:AppleWebKit-534.10 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_3 Localization:en-us Browser:Safari-5.0 Engine:AppleWebKit-533.16 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en Browser:Safari-4.0dp1 Engine:AppleWebKit-526.9 Bot:false Mobile:false", + "Mozilla:5.0 Platform:iPhone OS:CPU iPhone OS 7_0_3 like Mac OS X Browser:Safari-7.0 Engine:AppleWebKit-537.51.1 Bot:false Mobile:true", + "Mozilla:5.0 Platform:iPhone OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true", + "Mozilla:5.0 Platform:iPod OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true", + "Mozilla:5.0 Platform:iPad OS:CPU OS 3_2 like Mac OS X Localization:en-us Browser:Safari-4.0.4 Engine:AppleWebKit-531.21.10 Bot:false Mobile:true", + "Mozilla:5.0 Platform:webOS OS:Palm Localization:en-US Browser:webOS-1.0 Engine:AppleWebKit-532.2 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Linux OS:Android 1.5 Localization:de- Browser:Android-3.1.2 Engine:AppleWebKit-528.5+ Bot:false Mobile:true", + "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry 9800 Localization:en Browser:BlackBerry-6.0.0.141 Engine:AppleWebKit-534.1+ Bot:false Mobile:true", + "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry Browser:BlackBerry-10.0.9.388 Engine:AppleWebKit-537.3+ Bot:false Mobile:true", + "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.4 Browser:Symbian-3.0 Engine:AppleWebKit-525 Bot:false Mobile:true", + "Mozilla:5.0 Platform:Linux OS:Android 4.2.1 Browser:Chrome-18.0.1025.166 Engine:AppleWebKit-535.19 Bot:false Mobile:true", + "Mozilla:5.0 Platform:en-us Localization:en-us Browser:Safari-3.1 Engine:AppleWebKit-525.13 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:true", + "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Linux OS:Linux Browser:Chrome-22.0.1229.79 Engine:AppleWebKit-537.4 Bot:false Mobile:false", + "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.1 Browser:Symbian-413 Engine:AppleWebKit-413 Bot:false Mobile:true", +} + +// Internal: beautify the UserAgent reference into a string so it can be +// tested later on. +// +// ua - a UserAgent reference. +// +// Returns a string that contains the beautified representation. +func beautify(ua *UserAgent) (s string) { + if len(ua.Mozilla()) > 0 { + s += "Mozilla:" + ua.Mozilla() + " " + } + if len(ua.Platform()) > 0 { + s += "Platform:" + ua.Platform() + " " + } + if len(ua.OS()) > 0 { + s += "OS:" + ua.OS() + " " + } + if len(ua.Localization()) > 0 { + s += "Localization:" + ua.Localization() + " " + } + str1, str2 := ua.Browser() + if len(str1) > 0 { + s += "Browser:" + str1 + if len(str2) > 0 { + s += "-" + str2 + " " + } else { + s += " " + } + } + str1, str2 = ua.Engine() + if len(str1) > 0 { + s += "Engine:" + str1 + if len(str2) > 0 { + s += "-" + str2 + " " + } else { + s += " " + } + } + s += "Bot:" + fmt.Sprintf("%v", ua.Bot()) + " " + s += "Mobile:" + fmt.Sprintf("%v", ua.Mobile()) + return s +} + +// The test suite. +func TestUserAgent(t *testing.T) { + for i, tt := range uastrings { + ua := New(tt.ua) + got := beautify(ua) + if expected[i] != got { + t.Errorf("Test %v => %q, expected %q", tt.name, got, expected[i]) + } + } +} + +// Benchmark: it parses each User-Agent string on the uastrings slice b.N times. +func BenchmarkUserAgent(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + for _, tt := range uastrings { + ua := new(UserAgent) + b.StartTimer() + ua.Parse(tt.ua) + } + } +} diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/bot.go b/Godeps/_workspace/src/github.com/mssola/user_agent/bot.go new file mode 100644 index 000000000..cc993d8fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/bot.go @@ -0,0 +1,121 @@ +// Copyright (C) 2014-2015 Miquel Sabaté Solà +// This file is licensed under the MIT license. +// See the LICENSE file. + +package user_agent + +import ( + "regexp" + "strings" +) + +// Get the name of the bot from the website that may be in the given comment. If +// there is no website in the comment, then an empty string is returned. +func getFromSite(comment []string) string { + if len(comment) == 0 { + return "" + } + + // Where we should check the website. + idx := 2 + if len(comment) < 3 { + idx = 0 + } + + // Pick the site. + re := regexp.MustCompile("http://.+\\.\\w+") + results := re.FindStringSubmatch(comment[idx]) + if len(results) == 1 { + // If it's a simple comment, just return the name of the site. + if idx == 0 { + return results[0] + } + + // This is a large comment, usually the name will be in the previous + // field of the comment. + return strings.TrimSpace(comment[1]) + } + return "" +} + +// Returns true if the info that we currently have corresponds to the Google +// mobile bot. This function also modifies some attributes in the receiver +// accordingly. +func (p *UserAgent) googleBot() bool { + // This is a hackish way to detect Google's mobile bot. + if strings.Index(p.ua, "Googlebot") != -1 { + p.platform = "" + p.undecided = true + } + return p.undecided +} + +// Set the attributes of the receiver as given by the parameters. All the other +// parameters are set to empty. +func (p *UserAgent) setSimple(name, version string, bot bool) { + p.bot = bot + if !bot { + p.mozilla = "" + } + p.browser.Name = name + p.browser.Version = version + p.browser.Engine = "" + p.browser.EngineVersion = "" + p.os = "" + p.localization = "" +} + +// Fix some values for some weird browsers. +func (p *UserAgent) fixOther(sections []section) { + if len(sections) > 0 { + p.browser.Name = sections[0].name + p.browser.Version = sections[0].version + p.mozilla = "" + } +} + +// Check if we're dealing with a bot or with some weird browser. If that is the +// case, the receiver will be modified accordingly. +func (p *UserAgent) checkBot(sections []section) { + // If there's only one element, and it's doesn't have the Mozilla string, + // check whether this is a bot or not. + if len(sections) == 1 && sections[0].name != "Mozilla" { + p.mozilla = "" + + // Check whether the name has some suspicious "bot" in his name. + reg, _ := regexp.Compile("(?i)bot") + if reg.Match([]byte(sections[0].name)) { + p.setSimple(sections[0].name, "", true) + return + } + + // Tough luck, let's try to see if it has a website in his comment. + if name := getFromSite(sections[0].comment); name != "" { + // First of all, this is a bot. Moreover, since it doesn't have the + // Mozilla string, we can assume that the name and the version are + // the ones from the first section. + p.setSimple(sections[0].name, sections[0].version, true) + return + } + + // At this point we are sure that this is not a bot, but some weirdo. + p.setSimple(sections[0].name, sections[0].version, false) + } else { + // Let's iterate over the available comments and check for a website. + for _, v := range sections { + if name := getFromSite(v.comment); name != "" { + // Ok, we've got a bot name. + results := strings.SplitN(name, "/", 2) + version := "" + if len(results) == 2 { + version = results[1] + } + p.setSimple(results[0], version, true) + return + } + } + + // We will assume that this is some other weird browser. + p.fixOther(sections) + } +} diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/browser.go b/Godeps/_workspace/src/github.com/mssola/user_agent/browser.go new file mode 100644 index 000000000..9fb27e5f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/browser.go @@ -0,0 +1,120 @@ +// Copyright (C) 2012-2015 Miquel Sabaté Solà +// This file is licensed under the MIT license. +// See the LICENSE file. + +package user_agent + +import ( + "regexp" + "strings" +) + +// A struct containing all the information that we might be +// interested from the browser. +type Browser struct { + // The name of the browser's engine. + Engine string + + // The version of the browser's engine. + EngineVersion string + + // The name of the browser. + Name string + + // The version of the browser. + Version string +} + +// Extract all the information that we can get from the User-Agent string +// about the browser and update the receiver with this information. +// +// The function receives just one argument "sections", that contains the +// sections from the User-Agent string after being parsed. +func (p *UserAgent) detectBrowser(sections []section) { + slen := len(sections) + + if sections[0].name == "Opera" { + p.mozilla = "" + p.browser.Name = "Opera" + p.browser.Version = sections[0].version + p.browser.Engine = "Presto" + if slen > 1 { + p.browser.EngineVersion = sections[1].version + } + } else if slen > 1 { + engine := sections[1] + p.browser.Engine = engine.name + p.browser.EngineVersion = engine.version + if slen > 2 { + p.browser.Version = sections[2].version + if engine.name == "AppleWebKit" { + if sections[slen-1].name == "OPR" { + p.browser.Name = "Opera" + p.browser.Version = sections[slen-1].version + } else if sections[2].name == "Chrome" { + p.browser.Name = "Chrome" + } else { + p.browser.Name = "Safari" + } + } else if engine.name == "Gecko" { + name := sections[2].name + if name == "MRA" && slen > 4 { + name = sections[4].name + p.browser.Version = sections[4].version + } + p.browser.Name = name + } else if engine.name == "like" && sections[2].name == "Gecko" { + // This is the new user agent from Internet Explorer 11. + p.browser.Engine = "Trident" + p.browser.Name = "Internet Explorer" + reg, _ := regexp.Compile("^rv:(.+)$") + for _, c := range sections[0].comment { + version := reg.FindStringSubmatch(c) + if len(version) > 0 { + p.browser.Version = version[1] + return + } + } + p.browser.Version = "" + } + } + } else if slen == 1 && len(sections[0].comment) > 1 { + comment := sections[0].comment + if comment[0] == "compatible" && strings.HasPrefix(comment[1], "MSIE") { + p.browser.Engine = "Trident" + p.browser.Name = "Internet Explorer" + // The MSIE version may be reported as the compatibility version. + // For IE 8 through 10, the Trident token is more accurate. + // http://msdn.microsoft.com/en-us/library/ie/ms537503(v=vs.85).aspx#VerToken + for _, v := range comment { + if strings.HasPrefix(v, "Trident/") { + switch v[8:] { + case "4.0": + p.browser.Version = "8.0" + case "5.0": + p.browser.Version = "9.0" + case "6.0": + p.browser.Version = "10.0" + } + break + } + } + // If the Trident token is not provided, fall back to MSIE token. + if p.browser.Version == "" { + p.browser.Version = strings.TrimSpace(comment[1][4:]) + } + } + } +} + +// Returns two strings. The first string is the name of the engine and the +// second one is the version of the engine. +func (p *UserAgent) Engine() (string, string) { + return p.browser.Engine, p.browser.EngineVersion +} + +// Returns two strings. The first string is the name of the browser and the +// second one is the version of the browser. +func (p *UserAgent) Browser() (string, string) { + return p.browser.Name, p.browser.Version +} diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/operating_systems.go b/Godeps/_workspace/src/github.com/mssola/user_agent/operating_systems.go new file mode 100644 index 000000000..2771b57e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/operating_systems.go @@ -0,0 +1,260 @@ +// Copyright (C) 2012-2015 Miquel Sabaté Solà +// This file is licensed under the MIT license. +// See the LICENSE file. + +package user_agent + +import "strings" + +// Normalize the name of the operating system. By now, this just +// affects to Windows. +// +// Returns a string containing the normalized name for the Operating System. +func normalizeOS(name string) string { + sp := strings.SplitN(name, " ", 3) + if len(sp) != 3 { + return name + } + + switch sp[2] { + case "5.0": + return "Windows 2000" + case "5.01": + return "Windows 2000, Service Pack 1 (SP1)" + case "5.1": + return "Windows XP" + case "5.2": + return "Windows XP x64 Edition" + case "6.0": + return "Windows Vista" + case "6.1": + return "Windows 7" + case "6.2": + return "Windows 8" + case "6.3": + return "Windows 8.1" + case "6.4": + return "Windows 10" + } + return name +} + +// Guess the OS, the localization and if this is a mobile device for a +// Webkit-powered browser. +// +// The first argument p is a reference to the current UserAgent and the second +// argument is a slice of strings containing the comment. +func webkit(p *UserAgent, comment []string) { + if p.platform == "webOS" { + p.browser.Name = p.platform + p.os = "Palm" + if len(comment) > 2 { + p.localization = comment[2] + } + p.mobile = true + } else if p.platform == "Symbian" { + p.mobile = true + p.browser.Name = p.platform + p.os = comment[0] + } else if p.platform == "Linux" { + p.mobile = true + if p.browser.Name == "Safari" { + p.browser.Name = "Android" + } + if len(comment) > 1 { + if comment[1] == "U" { + if len(comment) > 2 { + p.os = comment[2] + } else { + p.mobile = false + p.os = comment[0] + } + } else { + p.os = comment[1] + } + } + if len(comment) > 3 { + p.localization = comment[3] + } + } else if len(comment) > 0 { + if len(comment) > 3 { + p.localization = comment[3] + } + if strings.HasPrefix(comment[0], "Windows NT") { + p.os = normalizeOS(comment[0]) + } else if len(comment) < 2 { + p.localization = comment[0] + } else if len(comment) < 3 { + if !p.googleBot() { + p.os = normalizeOS(comment[1]) + } + } else { + p.os = normalizeOS(comment[2]) + } + if p.platform == "BlackBerry" { + p.browser.Name = p.platform + if p.os == "Touch" { + p.os = p.platform + } + } + } +} + +// Guess the OS, the localization and if this is a mobile device +// for a Gecko-powered browser. +// +// The first argument p is a reference to the current UserAgent and the second +// argument is a slice of strings containing the comment. +func gecko(p *UserAgent, comment []string) { + if len(comment) > 1 { + if comment[1] == "U" { + if len(comment) > 2 { + p.os = normalizeOS(comment[2]) + } else { + p.os = normalizeOS(comment[1]) + } + } else { + if p.platform == "Android" { + p.mobile = true + p.platform, p.os = normalizeOS(comment[1]), p.platform + } else if comment[0] == "Mobile" || comment[0] == "Tablet" { + p.mobile = true + p.os = "FirefoxOS" + } else { + if p.os == "" { + p.os = normalizeOS(comment[1]) + } + } + } + if len(comment) > 3 { + p.localization = comment[3] + } + } +} + +// Guess the OS, the localization and if this is a mobile device +// for Internet Explorer. +// +// The first argument p is a reference to the current UserAgent and the second +// argument is a slice of strings containing the comment. +func trident(p *UserAgent, comment []string) { + // Internet Explorer only runs on Windows. + p.platform = "Windows" + + // The OS can be set before to handle a new case in IE11. + if p.os == "" { + if len(comment) > 2 { + p.os = normalizeOS(comment[2]) + } else { + p.os = "Windows NT 4.0" + } + } + + // Last but not least, let's detect if it comes from a mobile device. + for _, v := range comment { + if strings.HasPrefix(v, "IEMobile") { + p.mobile = true + return + } + } +} + +// Guess the OS, the localization and if this is a mobile device +// for Opera. +// +// The first argument p is a reference to the current UserAgent and the second +// argument is a slice of strings containing the comment. +func opera(p *UserAgent, comment []string) { + slen := len(comment) + + if strings.HasPrefix(comment[0], "Windows") { + p.platform = "Windows" + p.os = normalizeOS(comment[0]) + if slen > 2 { + if slen > 3 && strings.HasPrefix(comment[2], "MRA") { + p.localization = comment[3] + } else { + p.localization = comment[2] + } + } + } else { + if strings.HasPrefix(comment[0], "Android") { + p.mobile = true + } + p.platform = comment[0] + if slen > 1 { + p.os = comment[1] + if slen > 3 { + p.localization = comment[3] + } + } else { + p.os = comment[0] + } + } +} + +// Given the comment of the first section of the UserAgent string, +// get the platform. +func getPlatform(comment []string) string { + if len(comment) > 0 { + if comment[0] != "compatible" { + if strings.HasPrefix(comment[0], "Windows") { + return "Windows" + } else if strings.HasPrefix(comment[0], "Symbian") { + return "Symbian" + } else if strings.HasPrefix(comment[0], "webOS") { + return "webOS" + } else if comment[0] == "BB10" { + return "BlackBerry" + } + return comment[0] + } + } + return "" +} + +// Detect some properties of the OS from the given section. +func (p *UserAgent) detectOS(s section) { + if s.name == "Mozilla" { + // Get the platform here. Be aware that IE11 provides a new format + // that is not backwards-compatible with previous versions of IE. + p.platform = getPlatform(s.comment) + if p.platform == "Windows" && len(s.comment) > 0 { + p.os = normalizeOS(s.comment[0]) + } + + // And finally get the OS depending on the engine. + switch p.browser.Engine { + case "": + p.undecided = true + case "Gecko": + gecko(p, s.comment) + case "AppleWebKit": + webkit(p, s.comment) + case "Trident": + trident(p, s.comment) + } + } else if s.name == "Opera" { + if len(s.comment) > 0 { + opera(p, s.comment) + } + } else { + // Check whether this is a bot or just a weird browser. + p.undecided = true + } +} + +// Returns a string containing the platform.. +func (p *UserAgent) Platform() string { + return p.platform +} + +// Returns a string containing the name of the Operating System. +func (p *UserAgent) OS() string { + return p.os +} + +// Returns a string containing the localization. +func (p *UserAgent) Localization() string { + return p.localization +} diff --git a/Godeps/_workspace/src/github.com/mssola/user_agent/user_agent.go b/Godeps/_workspace/src/github.com/mssola/user_agent/user_agent.go new file mode 100644 index 000000000..df1aa3b78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mssola/user_agent/user_agent.go @@ -0,0 +1,169 @@ +// Copyright (C) 2012-2015 Miquel Sabaté Solà +// This file is licensed under the MIT license. +// See the LICENSE file. + +// Package user_agent implements an HTTP User Agent string parser. It defines +// the type UserAgent that contains all the information from the parsed string. +// It also implements the Parse function and getters for all the relevant +// information that has been extracted from a parsed User Agent string. +package user_agent + +import ( + "strings" +) + +// A section contains the name of the product, its version and +// an optional comment. +type section struct { + name string + version string + comment []string +} + +// The UserAgent struct contains all the info that can be extracted +// from the User-Agent string. +type UserAgent struct { + ua string + mozilla string + platform string + os string + localization string + browser Browser + bot bool + mobile bool + undecided bool +} + +// Read from the given string until the given delimiter or the +// end of the string have been reached. +// +// The first argument is the user agent string being parsed. The second +// argument is a reference pointing to the current index of the user agent +// string. The delimiter argument specifies which character is the delimiter +// and the cat argument determines whether nested '(' should be ignored or not. +// +// Returns an array of bytes containing what has been read. +func readUntil(ua string, index *int, delimiter byte, cat bool) []byte { + var buffer []byte + + i := *index + catalan := 0 + for ; i < len(ua); i = i + 1 { + if ua[i] == delimiter { + if catalan == 0 { + *index = i + 1 + return buffer + } + catalan-- + } else if cat && ua[i] == '(' { + catalan++ + } + buffer = append(buffer, ua[i]) + } + *index = i + 1 + return buffer +} + +// Parse the given product, that is, just a name or a string +// formatted as Name/Version. +// +// It returns two strings. The first string is the name of the product and the +// second string contains the version of the product. +func parseProduct(product []byte) (string, string) { + prod := strings.SplitN(string(product), "/", 2) + if len(prod) == 2 { + return prod[0], prod[1] + } + return string(product), "" +} + +// Parse a section. A section is typically formatted as follows +// "Name/Version (comment)". Both, the comment and the version are optional. +// +// The first argument is the user agent string being parsed. The second +// argument is a reference pointing to the current index of the user agent +// string. +// +// Returns a section containing the information that we could extract +// from the last parsed section. +func parseSection(ua string, index *int) (s section) { + buffer := readUntil(ua, index, ' ', false) + + s.name, s.version = parseProduct(buffer) + if *index < len(ua) && ua[*index] == '(' { + *index++ + buffer = readUntil(ua, index, ')', true) + s.comment = strings.Split(string(buffer), "; ") + *index++ + } + return s +} + +// Initialize the parser. +func (p *UserAgent) initialize() { + p.ua = "" + p.mozilla = "" + p.platform = "" + p.os = "" + p.localization = "" + p.browser.Engine = "" + p.browser.EngineVersion = "" + p.browser.Name = "" + p.browser.Version = "" + p.bot = false + p.mobile = false + p.undecided = false +} + +// Parse the given User-Agent string and get the resulting UserAgent object. +// +// Returns an UserAgent object that has been initialized after parsing +// the given User-Agent string. +func New(ua string) *UserAgent { + o := &UserAgent{} + o.Parse(ua) + return o +} + +// Parse the given User-Agent string. After calling this function, the +// receiver will be setted up with all the information that we've extracted. +func (p *UserAgent) Parse(ua string) { + var sections []section + + p.initialize() + p.ua = ua + for index, limit := 0, len(ua); index < limit; { + s := parseSection(ua, &index) + if !p.mobile && s.name == "Mobile" { + p.mobile = true + } + sections = append(sections, s) + } + + if len(sections) > 0 { + p.mozilla = sections[0].version + + p.detectBrowser(sections) + p.detectOS(sections[0]) + + if p.undecided { + p.checkBot(sections) + } + } +} + +// Returns the mozilla version (it's how the User Agent string begins: +// "Mozilla/5.0 ...", unless we're dealing with Opera, of course). +func (p *UserAgent) Mozilla() string { + return p.mozilla +} + +// Returns true if it's a bot, false otherwise. +func (p *UserAgent) Bot() bool { + return p.bot +} + +// Returns true if it's a mobile device, false otherwise. +func (p *UserAgent) Mobile() bool { + return p.mobile +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/.travis.yml b/Godeps/_workspace/src/github.com/nfnt/resize/.travis.yml new file mode 100644 index 000000000..57bd4a76e --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - tip diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/LICENSE b/Godeps/_workspace/src/github.com/nfnt/resize/LICENSE new file mode 100644 index 000000000..7836cad5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/converter.go b/Godeps/_workspace/src/github.com/nfnt/resize/converter.go new file mode 100644 index 000000000..84bd284ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/converter.go @@ -0,0 +1,452 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "image" + +// Keep value in [0,255] range. +func clampUint8(in int32) uint8 { + // casting a negative int to an uint will result in an overflown + // large uint. this behavior will be exploited here and in other functions + // to achieve a higher performance. + if uint32(in) < 256 { + return uint8(in) + } + if in > 255 { + return 255 + } + return 0 +} + +// Keep value in [0,65535] range. +func clampUint16(in int64) uint16 { + if uint64(in) < 65536 { + return uint16(in) + } + if in > 65535 { + return 65535 + } + return 0 +} + +func resizeGeneric(in image.Image, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA() + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + } + + rgba[0] += int64(coeff) * int64(r) + rgba[1] += int64(coeff) * int64(g) + rgba[2] += int64(coeff) * int64(b) + rgba[3] += int64(coeff) * int64(a) + sum += int64(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[offset+2] = uint8(value >> 8) + out.Pix[offset+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[offset+4] = uint8(value >> 8) + out.Pix[offset+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[offset+6] = uint8(value >> 8) + out.Pix[offset+7] = uint8(value) + } + } +} + +func resizeRGBA(in *image.RGBA, out *image.NRGBA, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + + r := uint32(row[xi+0]) + g := uint32(row[xi+1]) + b := uint32(row[xi+2]) + a := uint32(row[xi+3]) + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + } + + rgba[0] += int32(coeff) * int32(r) + rgba[1] += int32(coeff) * int32(g) + rgba[2] += int32(coeff) * int32(b) + rgba[3] += int32(coeff) * int32(a) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = clampUint8(rgba[0] / sum) + out.Pix[xo+1] = clampUint8(rgba[1] / sum) + out.Pix[xo+2] = clampUint8(rgba[2] / sum) + out.Pix[xo+3] = clampUint8(rgba[3] / sum) + } + } +} + +func resizeNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += int32(coeff) * int32(row[xi+0]) + rgba[1] += int32(coeff) * int32(row[xi+1]) + rgba[2] += int32(coeff) * int32(row[xi+2]) + rgba[3] += int32(coeff) * int32(row[xi+3]) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = clampUint8(rgba[0] / sum) + out.Pix[xo+1] = clampUint8(rgba[1] / sum) + out.Pix[xo+2] = clampUint8(rgba[2] / sum) + out.Pix[xo+3] = clampUint8(rgba[3] / sum) + } + } +} + +func resizeRGBA64(in *image.RGBA64, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + + r := uint32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + g := uint32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + b := uint32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + a := uint32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + } + + rgba[0] += int64(coeff) * int64(r) + rgba[1] += int64(coeff) * int64(g) + rgba[2] += int64(coeff) * int64(b) + rgba[3] += int64(coeff) * int64(a) + sum += int64(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func resizeNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) + rgba[1] += int64(coeff) * int64(uint16(row[xi+2])<<8|uint16(row[xi+3])) + rgba[2] += int64(coeff) * int64(uint16(row[xi+4])<<8|uint16(row[xi+5])) + rgba[3] += int64(coeff) * int64(uint16(row[xi+6])<<8|uint16(row[xi+7])) + sum += int64(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func resizeGray(in *image.Gray, out *image.Gray, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[(x-newBounds.Min.X)*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + gray += int32(coeff) * int32(row[xi]) + sum += int32(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X) + out.Pix[offset] = clampUint8(gray / sum) + } + } +} + +func resizeGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 2 + case xi >= maxX: + xi = 2 * maxX + default: + xi = 0 + } + gray += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) + sum += int64(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2 + value := clampUint16(gray / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + } + } +} + +func resizeYCbCr(in *ycc, out *ycc, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var p [3]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 3 + case xi >= maxX: + xi = 3 * maxX + default: + xi = 0 + } + p[0] += int32(coeff) * int32(row[xi+0]) + p[1] += int32(coeff) * int32(row[xi+1]) + p[2] += int32(coeff) * int32(row[xi+2]) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3 + out.Pix[xo+0] = clampUint8(p[0] / sum) + out.Pix[xo+1] = clampUint8(p[1] / sum) + out.Pix[xo+2] = clampUint8(p[2] / sum) + } + } +} + +func nearestYCbCr(in *ycc, out *ycc, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var p [3]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 3 + case xi >= maxX: + xi = 3 * maxX + default: + xi = 0 + } + p[0] += float32(row[xi+0]) + p[1] += float32(row[xi+1]) + p[2] += float32(row[xi+2]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3 + out.Pix[xo+0] = floatToUint8(p[0] / sum) + out.Pix[xo+1] = floatToUint8(p[1] / sum) + out.Pix[xo+2] = floatToUint8(p[2] / sum) + } + } +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/converter_test.go b/Godeps/_workspace/src/github.com/nfnt/resize/converter_test.go new file mode 100644 index 000000000..85639efc2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/converter_test.go @@ -0,0 +1,43 @@ +package resize + +import ( + "testing" +) + +func Test_ClampUint8(t *testing.T) { + var testData = []struct { + in int32 + expected uint8 + }{ + {0, 0}, + {255, 255}, + {128, 128}, + {-2, 0}, + {256, 255}, + } + for _, test := range testData { + actual := clampUint8(test.in) + if actual != test.expected { + t.Fail() + } + } +} + +func Test_ClampUint16(t *testing.T) { + var testData = []struct { + in int64 + expected uint16 + }{ + {0, 0}, + {65535, 65535}, + {128, 128}, + {-2, 0}, + {65536, 65535}, + } + for _, test := range testData { + actual := clampUint16(test.in) + if actual != test.expected { + t.Fail() + } + } +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/filters.go b/Godeps/_workspace/src/github.com/nfnt/resize/filters.go new file mode 100644 index 000000000..4ce04e389 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/filters.go @@ -0,0 +1,143 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "math" +) + +func nearest(in float64) float64 { + if in >= -0.5 && in < 0.5 { + return 1 + } + return 0 +} + +func linear(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return 1 - in + } + return 0 +} + +func cubic(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return in*in*(1.5*in-2.5) + 1.0 + } + if in <= 2 { + return in*(in*(2.5-0.5*in)-4.0) + 2.0 + } + return 0 +} + +func mitchellnetravali(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return (7.0*in*in*in - 12.0*in*in + 5.33333333333) * 0.16666666666 + } + if in <= 2 { + return (-2.33333333333*in*in*in + 12.0*in*in - 20.0*in + 10.6666666667) * 0.16666666666 + } + return 0 +} + +func sinc(x float64) float64 { + x = math.Abs(x) * math.Pi + if x >= 1.220703e-4 { + return math.Sin(x) / x + } + return 1 +} + +func lanczos2(in float64) float64 { + if in > -2 && in < 2 { + return sinc(in) * sinc(in*0.5) + } + return 0 +} + +func lanczos3(in float64) float64 { + if in > -3 && in < 3 { + return sinc(in) * sinc(in*0.3333333333333333) + } + return 0 +} + +// range [-256,256] +func createWeights8(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int16, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]int16, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + coeffs[y*filterLength+i] = int16(kernel(in) * 256) + } + } + + return coeffs, start, filterLength +} + +// range [-65536,65536] +func createWeights16(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int32, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]int32, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + coeffs[y*filterLength+i] = int32(kernel(in) * 65536) + } + } + + return coeffs, start, filterLength +} + +func createWeightsNearest(dy, filterLength int, blur, scale float64) ([]bool, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]bool, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + if in >= -0.5 && in < 0.5 { + coeffs[y*filterLength+i] = true + } else { + coeffs[y*filterLength+i] = false + } + } + } + + return coeffs, start, filterLength +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/nearest.go b/Godeps/_workspace/src/github.com/nfnt/resize/nearest.go new file mode 100644 index 000000000..888039d85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/nearest.go @@ -0,0 +1,318 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "image" + +func floatToUint8(x float32) uint8 { + // Nearest-neighbor values are always + // positive no need to check lower-bound. + if x > 0xfe { + return 0xff + } + return uint8(x) +} + +func floatToUint16(x float32) uint16 { + if x > 0xfffe { + return 0xffff + } + return uint16(x) +} + +func nearestGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA() + rgba[0] += float32(r) + rgba[1] += float32(g) + rgba[2] += float32(b) + rgba[3] += float32(a) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[offset+2] = uint8(value >> 8) + out.Pix[offset+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[offset+4] = uint8(value >> 8) + out.Pix[offset+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[offset+6] = uint8(value >> 8) + out.Pix[offset+7] = uint8(value) + } + } +} + +func nearestRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += float32(row[xi+0]) + rgba[1] += float32(row[xi+1]) + rgba[2] += float32(row[xi+2]) + rgba[3] += float32(row[xi+3]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = floatToUint8(rgba[0] / sum) + out.Pix[xo+1] = floatToUint8(rgba[1] / sum) + out.Pix[xo+2] = floatToUint8(rgba[2] / sum) + out.Pix[xo+3] = floatToUint8(rgba[3] / sum) + } + } +} + +func nearestNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += float32(row[xi+0]) + rgba[1] += float32(row[xi+1]) + rgba[2] += float32(row[xi+2]) + rgba[3] += float32(row[xi+3]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = floatToUint8(rgba[0] / sum) + out.Pix[xo+1] = floatToUint8(rgba[1] / sum) + out.Pix[xo+2] = floatToUint8(rgba[2] / sum) + out.Pix[xo+3] = floatToUint8(rgba[3] / sum) + } + } +} + +func nearestRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func nearestNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func nearestGray(in *image.Gray, out *image.Gray, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + gray += float32(row[xi]) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X) + out.Pix[offset] = floatToUint8(gray / sum) + } + } +} + +func nearestGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 2 + case xi >= maxX: + xi = 2 * maxX + default: + xi = 0 + } + gray += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2 + value := floatToUint16(gray / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + } + } +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/nearest_test.go b/Godeps/_workspace/src/github.com/nfnt/resize/nearest_test.go new file mode 100644 index 000000000..d4a76dda5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/nearest_test.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "testing" + +func Test_FloatToUint8(t *testing.T) { + var testData = []struct { + in float32 + expected uint8 + }{ + {0, 0}, + {255, 255}, + {128, 128}, + {1, 1}, + {256, 255}, + } + for _, test := range testData { + actual := floatToUint8(test.in) + if actual != test.expected { + t.Fail() + } + } +} + +func Test_FloatToUint16(t *testing.T) { + var testData = []struct { + in float32 + expected uint16 + }{ + {0, 0}, + {65535, 65535}, + {128, 128}, + {1, 1}, + {65536, 65535}, + } + for _, test := range testData { + actual := floatToUint16(test.in) + if actual != test.expected { + t.Fail() + } + } +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/resize.go b/Godeps/_workspace/src/github.com/nfnt/resize/resize.go new file mode 100644 index 000000000..4d4ff6e3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/resize.go @@ -0,0 +1,614 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +// Package resize implements various image resizing methods. +// +// The package works with the Image interface described in the image package. +// Various interpolation methods are provided and multiple processors may be +// utilized in the computations. +// +// Example: +// imgResized := resize.Resize(1000, 0, imgOld, resize.MitchellNetravali) +package resize + +import ( + "image" + "runtime" + "sync" +) + +// An InterpolationFunction provides the parameters that describe an +// interpolation kernel. It returns the number of samples to take +// and the kernel function to use for sampling. +type InterpolationFunction int + +// InterpolationFunction constants +const ( + // Nearest-neighbor interpolation + NearestNeighbor InterpolationFunction = iota + // Bilinear interpolation + Bilinear + // Bicubic interpolation (with cubic hermite spline) + Bicubic + // Mitchell-Netravali interpolation + MitchellNetravali + // Lanczos interpolation (a=2) + Lanczos2 + // Lanczos interpolation (a=3) + Lanczos3 +) + +// kernal, returns an InterpolationFunctions taps and kernel. +func (i InterpolationFunction) kernel() (int, func(float64) float64) { + switch i { + case Bilinear: + return 2, linear + case Bicubic: + return 4, cubic + case MitchellNetravali: + return 4, mitchellnetravali + case Lanczos2: + return 4, lanczos2 + case Lanczos3: + return 6, lanczos3 + default: + // Default to NearestNeighbor. + return 2, nearest + } +} + +// values <1 will sharpen the image +var blur = 1.0 + +// Resize scales an image to new width and height using the interpolation function interp. +// A new image with the given dimensions will be returned. +// If one of the parameters width or height is set to 0, its size will be calculated so that +// the aspect ratio is that of the originating image. +// The resizing algorithm uses channels for parallel computation. +func Resize(width, height uint, img image.Image, interp InterpolationFunction) image.Image { + scaleX, scaleY := calcFactors(width, height, float64(img.Bounds().Dx()), float64(img.Bounds().Dy())) + if width == 0 { + width = uint(0.7 + float64(img.Bounds().Dx())/scaleX) + } + if height == 0 { + height = uint(0.7 + float64(img.Bounds().Dy())/scaleY) + } + + // Trivial case: return input image + if int(width) == img.Bounds().Dx() && int(height) == img.Bounds().Dy() { + return img + } + + if interp == NearestNeighbor { + return resizeNearest(width, height, scaleX, scaleY, img, interp) + } + + taps, kernel := interp.kernel() + cpus := runtime.GOMAXPROCS(0) + wg := sync.WaitGroup{} + + // Generic access to image.Image is slow in tight loops. + // The optimal access has to be determined from the concrete image type. + switch input := img.(type) { + case *image.RGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + + case *image.YCbCr: + // 8-bit precision + // accessing the YCbCr arrays in a tight loop is slow. + // converting the image to ycc increases performance by 2x. + temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio) + result := newYCC(image.Rect(0, 0, int(width), int(height)), input.SubsampleRatio) + + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + in := imageYCbCrToYCC(input) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*ycc) + go func() { + defer wg.Done() + resizeYCbCr(in, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*ycc) + go func() { + defer wg.Done() + resizeYCbCr(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result.YCbCr() + case *image.RGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray: + // 8-bit precision + temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + resizeGray(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + resizeGray(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray16: + // 16-bit precision + temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray16(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + resizeGray16(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + resizeGray16(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + default: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeGeneric(img, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + } +} + +func resizeNearest(width, height uint, scaleX, scaleY float64, img image.Image, interp InterpolationFunction) image.Image { + taps, _ := interp.kernel() + cpus := runtime.GOMAXPROCS(0) + wg := sync.WaitGroup{} + + switch input := img.(type) { + case *image.RGBA: + // 8-bit precision + temp := image.NewRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA) + go func() { + defer wg.Done() + nearestRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA) + go func() { + defer wg.Done() + nearestRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + nearestNRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + nearestNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.YCbCr: + // 8-bit precision + // accessing the YCbCr arrays in a tight loop is slow. + // converting the image to ycc increases performance by 2x. + temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio) + result := newYCC(image.Rect(0, 0, int(width), int(height)), input.SubsampleRatio) + + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + in := imageYCbCrToYCC(input) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*ycc) + go func() { + defer wg.Done() + nearestYCbCr(in, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*ycc) + go func() { + defer wg.Done() + nearestYCbCr(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result.YCbCr() + case *image.RGBA64: + // 16-bit precision + temp := image.NewRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + nearestNRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + nearestNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray: + // 8-bit precision + temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + nearestGray(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + nearestGray(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray16: + // 16-bit precision + temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray16(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + nearestGray16(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + nearestGray16(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + default: + // 16-bit precision + temp := image.NewRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width))) + result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestGeneric(img, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + } + +} + +// Calculates scaling factors using old and new image dimensions. +func calcFactors(width, height uint, oldWidth, oldHeight float64) (scaleX, scaleY float64) { + if width == 0 { + if height == 0 { + scaleX = 1.0 + scaleY = 1.0 + } else { + scaleY = oldHeight / float64(height) + scaleX = scaleY + } + } else { + scaleX = oldWidth / float64(width) + if height == 0 { + scaleY = scaleX + } else { + scaleY = oldHeight / float64(height) + } + } + return +} + +type imageWithSubImage interface { + image.Image + SubImage(image.Rectangle) image.Image +} + +func makeSlice(img imageWithSubImage, i, n int) image.Image { + return img.SubImage(image.Rect(img.Bounds().Min.X, img.Bounds().Min.Y+i*img.Bounds().Dy()/n, img.Bounds().Max.X, img.Bounds().Min.Y+(i+1)*img.Bounds().Dy()/n)) +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/resize_test.go b/Godeps/_workspace/src/github.com/nfnt/resize/resize_test.go new file mode 100644 index 000000000..ee31ac494 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/resize_test.go @@ -0,0 +1,224 @@ +package resize + +import ( + "image" + "image/color" + "runtime" + "testing" +) + +var img = image.NewGray16(image.Rect(0, 0, 3, 3)) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) + img.Set(1, 1, color.White) +} + +func Test_Param1(t *testing.T) { + m := Resize(0, 0, img, NearestNeighbor) + if m.Bounds() != img.Bounds() { + t.Fail() + } +} + +func Test_Param2(t *testing.T) { + m := Resize(100, 0, img, NearestNeighbor) + if m.Bounds() != image.Rect(0, 0, 100, 100) { + t.Fail() + } +} + +func Test_ZeroImg(t *testing.T) { + zeroImg := image.NewGray16(image.Rect(0, 0, 0, 0)) + + m := Resize(0, 0, zeroImg, NearestNeighbor) + if m.Bounds() != zeroImg.Bounds() { + t.Fail() + } +} + +func Test_CorrectResize(t *testing.T) { + zeroImg := image.NewGray16(image.Rect(0, 0, 256, 256)) + + m := Resize(60, 0, zeroImg, NearestNeighbor) + if m.Bounds() != image.Rect(0, 0, 60, 60) { + t.Fail() + } +} + +func Test_SameColor(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetRGBA(x, y, color.RGBA{0x80, 0x80, 0x80, 0xFF}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := img.At(x, y).(color.RGBA) + if color.R != 0x80 || color.G != 0x80 || color.B != 0x80 || color.A != 0xFF { + t.Fail() + } + } + } +} + +func Test_Bounds(t *testing.T) { + img := image.NewRGBA(image.Rect(20, 10, 200, 99)) + out := Resize(80, 80, img, Lanczos2) + out.At(0, 0) +} + +func Test_SameSizeReturnsOriginal(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 10, 10)) + out := Resize(0, 0, img, Lanczos2) + + if img != out { + t.Fail() + } + + out = Resize(10, 10, img, Lanczos2) + + if img != out { + t.Fail() + } +} + +func Test_PixelCoordinates(t *testing.T) { + checkers := image.NewGray(image.Rect(0, 0, 4, 4)) + checkers.Pix = []uint8{ + 255, 0, 255, 0, + 0, 255, 0, 255, + 255, 0, 255, 0, + 0, 255, 0, 255, + } + + resized := Resize(12, 12, checkers, NearestNeighbor).(*image.Gray) + + if resized.Pix[0] != 255 || resized.Pix[1] != 255 || resized.Pix[2] != 255 { + t.Fail() + } + + if resized.Pix[3] != 0 || resized.Pix[4] != 0 || resized.Pix[5] != 0 { + t.Fail() + } +} + +func Test_ResizeWithPremultipliedAlpha(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 1, 4)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + // 0x80 = 0.5 * 0xFF. + img.SetRGBA(0, y, color.RGBA{0x80, 0x80, 0x80, 0x80}) + } + + out := Resize(1, 2, img, MitchellNetravali) + + outputColor := out.At(0, 0).(color.NRGBA) + if outputColor.R != 0xFF { + t.Fail() + } +} + +const ( + // Use a small image size for benchmarks. We don't want memory performance + // to affect the benchmark results. + benchMaxX = 250 + benchMaxY = 250 + + // Resize values near the original size require increase the amount of time + // resize spends converting the image. + benchWidth = 200 + benchHeight = 200 +) + +func benchRGBA(b *testing.B, interp InterpolationFunction) { + m := image.NewRGBA(image.Rect(0, 0, benchMaxX, benchMaxY)) + // Initialize m's pixels to create a non-uniform image. + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + i := m.PixOffset(x, y) + m.Pix[i+0] = uint8(y + 4*x) + m.Pix[i+1] = uint8(y + 4*x) + m.Pix[i+2] = uint8(y + 4*x) + m.Pix[i+3] = uint8(4*y + x) + } + } + + var out image.Image + b.ResetTimer() + for i := 0; i < b.N; i++ { + out = Resize(benchWidth, benchHeight, m, interp) + } + out.At(0, 0) +} + +// The names of some interpolation functions are truncated so that the columns +// of 'go test -bench' line up. +func Benchmark_Nearest_RGBA(b *testing.B) { + benchRGBA(b, NearestNeighbor) +} + +func Benchmark_Bilinear_RGBA(b *testing.B) { + benchRGBA(b, Bilinear) +} + +func Benchmark_Bicubic_RGBA(b *testing.B) { + benchRGBA(b, Bicubic) +} + +func Benchmark_Mitchell_RGBA(b *testing.B) { + benchRGBA(b, MitchellNetravali) +} + +func Benchmark_Lanczos2_RGBA(b *testing.B) { + benchRGBA(b, Lanczos2) +} + +func Benchmark_Lanczos3_RGBA(b *testing.B) { + benchRGBA(b, Lanczos3) +} + +func benchYCbCr(b *testing.B, interp InterpolationFunction) { + m := image.NewYCbCr(image.Rect(0, 0, benchMaxX, benchMaxY), image.YCbCrSubsampleRatio422) + // Initialize m's pixels to create a non-uniform image. + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + m.Y[yi] = uint8(16*y + x) + m.Cb[ci] = uint8(y + 16*x) + m.Cr[ci] = uint8(y + 16*x) + } + } + var out image.Image + b.ResetTimer() + for i := 0; i < b.N; i++ { + out = Resize(benchWidth, benchHeight, m, interp) + } + out.At(0, 0) +} + +func Benchmark_Nearest_YCC(b *testing.B) { + benchYCbCr(b, NearestNeighbor) +} + +func Benchmark_Bilinear_YCC(b *testing.B) { + benchYCbCr(b, Bilinear) +} + +func Benchmark_Bicubic_YCC(b *testing.B) { + benchYCbCr(b, Bicubic) +} + +func Benchmark_Mitchell_YCC(b *testing.B) { + benchYCbCr(b, MitchellNetravali) +} + +func Benchmark_Lanczos2_YCC(b *testing.B) { + benchYCbCr(b, Lanczos2) +} + +func Benchmark_Lanczos3_YCC(b *testing.B) { + benchYCbCr(b, Lanczos3) +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail.go b/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail.go new file mode 100644 index 000000000..9efc246be --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" +) + +// Thumbnail will downscale provided image to max width and height preserving +// original aspect ratio and using the interpolation function interp. +// It will return original image, without processing it, if original sizes +// are already smaller than provided constraints. +func Thumbnail(maxWidth, maxHeight uint, img image.Image, interp InterpolationFunction) image.Image { + origBounds := img.Bounds() + origWidth := uint(origBounds.Dx()) + origHeight := uint(origBounds.Dy()) + newWidth, newHeight := origWidth, origHeight + + // Return original image if it have same or smaller size as constraints + if maxWidth >= origWidth && maxHeight >= origHeight { + return img + } + + // Preserve aspect ratio + if origWidth > maxWidth { + newHeight = uint(origHeight * maxWidth / origWidth) + if newHeight < 1 { + newHeight = 1 + } + newWidth = maxWidth + } + + if newHeight > maxHeight { + newWidth = uint(newWidth * maxHeight / newHeight) + if newWidth < 1 { + newWidth = 1 + } + newHeight = maxHeight + } + return Resize(newWidth, newHeight, img, interp) +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail_test.go b/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail_test.go new file mode 100644 index 000000000..bd9875b2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/thumbnail_test.go @@ -0,0 +1,47 @@ +package resize + +import ( + "image" + "runtime" + "testing" +) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +var thumbnailTests = []struct { + origWidth int + origHeight int + maxWidth uint + maxHeight uint + expectedWidth uint + expectedHeight uint +}{ + {5, 5, 10, 10, 5, 5}, + {10, 10, 5, 5, 5, 5}, + {10, 50, 10, 10, 2, 10}, + {50, 10, 10, 10, 10, 2}, + {50, 100, 60, 90, 45, 90}, + {120, 100, 60, 90, 60, 50}, + {200, 250, 200, 150, 120, 150}, +} + +func TestThumbnail(t *testing.T) { + for i, tt := range thumbnailTests { + img := image.NewGray16(image.Rect(0, 0, tt.origWidth, tt.origHeight)) + + outImg := Thumbnail(tt.maxWidth, tt.maxHeight, img, NearestNeighbor) + + newWidth := uint(outImg.Bounds().Dx()) + newHeight := uint(outImg.Bounds().Dy()) + if newWidth != tt.expectedWidth || + newHeight != tt.expectedHeight { + t.Errorf("%d. Thumbnail(%v, %v, img, NearestNeighbor) => "+ + "width: %v, height: %v, want width: %v, height: %v", + i, tt.maxWidth, tt.maxHeight, + newWidth, newHeight, tt.expectedWidth, tt.expectedHeight, + ) + } + } +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/ycc.go b/Godeps/_workspace/src/github.com/nfnt/resize/ycc.go new file mode 100644 index 000000000..104159955 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/ycc.go @@ -0,0 +1,227 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" + "image/color" +) + +// ycc is an in memory YCbCr image. The Y, Cb and Cr samples are held in a +// single slice to increase resizing performance. +type ycc struct { + // Pix holds the image's pixels, in Y, Cb, Cr order. The pixel at + // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3]. + Pix []uint8 + // Stride is the Pix stride (in bytes) between vertically adjacent pixels. + Stride int + // Rect is the image's bounds. + Rect image.Rectangle + // SubsampleRatio is the subsample ratio of the original YCbCr image. + SubsampleRatio image.YCbCrSubsampleRatio +} + +// PixOffset returns the index of the first element of Pix that corresponds to +// the pixel at (x, y). +func (p *ycc) PixOffset(x, y int) int { + return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3 +} + +func (p *ycc) Bounds() image.Rectangle { + return p.Rect +} + +func (p *ycc) ColorModel() color.Model { + return color.YCbCrModel +} + +func (p *ycc) At(x, y int) color.Color { + if !(image.Point{x, y}.In(p.Rect)) { + return color.YCbCr{} + } + i := p.PixOffset(x, y) + return color.YCbCr{ + p.Pix[i+0], + p.Pix[i+1], + p.Pix[i+2], + } +} + +func (p *ycc) Opaque() bool { + return true +} + +// SubImage returns an image representing the portion of the image p visible +// through r. The returned value shares pixels with the original image. +func (p *ycc) SubImage(r image.Rectangle) image.Image { + r = r.Intersect(p.Rect) + if r.Empty() { + return &ycc{SubsampleRatio: p.SubsampleRatio} + } + i := p.PixOffset(r.Min.X, r.Min.Y) + return &ycc{ + Pix: p.Pix[i:], + Stride: p.Stride, + Rect: r, + SubsampleRatio: p.SubsampleRatio, + } +} + +// newYCC returns a new ycc with the given bounds and subsample ratio. +func newYCC(r image.Rectangle, s image.YCbCrSubsampleRatio) *ycc { + w, h := r.Dx(), r.Dy() + buf := make([]uint8, 3*w*h) + return &ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: s} +} + +// YCbCr converts ycc to a YCbCr image with the same subsample ratio +// as the YCbCr image that ycc was generated from. +func (p *ycc) YCbCr() *image.YCbCr { + ycbcr := image.NewYCbCr(p.Rect, p.SubsampleRatio) + var off int + + switch ycbcr.SubsampleRatio { + case image.YCbCrSubsampleRatio422: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y - ycbcr.Rect.Min.Y) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + case image.YCbCrSubsampleRatio420: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y/2 - ycbcr.Rect.Min.Y/2) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + case image.YCbCrSubsampleRatio440: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y/2 - ycbcr.Rect.Min.Y/2) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + default: + // Default to 4:4:4 subsampling. + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y - ycbcr.Rect.Min.Y) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + } + return ycbcr +} + +// imageYCbCrToYCC converts a YCbCr image to a ycc image for resizing. +func imageYCbCrToYCC(in *image.YCbCr) *ycc { + w, h := in.Rect.Dx(), in.Rect.Dy() + r := image.Rect(0, 0, w, h) + buf := make([]uint8, 3*w*h) + p := ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: in.SubsampleRatio} + var off int + + switch in.SubsampleRatio { + case image.YCbCrSubsampleRatio422: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y - in.Rect.Min.Y) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + case image.YCbCrSubsampleRatio420: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y/2 - in.Rect.Min.Y/2) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + case image.YCbCrSubsampleRatio440: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y/2 - in.Rect.Min.Y/2) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + default: + // Default to 4:4:4 subsampling. + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y - in.Rect.Min.Y) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + } + return &p +} diff --git a/Godeps/_workspace/src/github.com/nfnt/resize/ycc_test.go b/Godeps/_workspace/src/github.com/nfnt/resize/ycc_test.go new file mode 100644 index 000000000..54d53d157 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nfnt/resize/ycc_test.go @@ -0,0 +1,214 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" + "image/color" + "testing" +) + +type Image interface { + image.Image + SubImage(image.Rectangle) image.Image +} + +func TestImage(t *testing.T) { + testImage := []Image{ + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio420), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio422), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio440), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio444), + } + for _, m := range testImage { + if !image.Rect(0, 0, 10, 10).Eq(m.Bounds()) { + t.Errorf("%T: want bounds %v, got %v", + m, image.Rect(0, 0, 10, 10), m.Bounds()) + continue + } + m = m.SubImage(image.Rect(3, 2, 9, 8)).(Image) + if !image.Rect(3, 2, 9, 8).Eq(m.Bounds()) { + t.Errorf("%T: sub-image want bounds %v, got %v", + m, image.Rect(3, 2, 9, 8), m.Bounds()) + continue + } + // Test that taking an empty sub-image starting at a corner does not panic. + m.SubImage(image.Rect(0, 0, 0, 0)) + m.SubImage(image.Rect(10, 0, 10, 0)) + m.SubImage(image.Rect(0, 10, 0, 10)) + m.SubImage(image.Rect(10, 10, 10, 10)) + } +} + +func TestConvertYCbCr(t *testing.T) { + testImage := []Image{ + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio420), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio422), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio440), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio444), + } + + for _, img := range testImage { + m := img.(*image.YCbCr) + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + m.Y[yi] = uint8(16*y + x) + m.Cb[ci] = uint8(y + 16*x) + m.Cr[ci] = uint8(y + 16*x) + } + } + + // test conversion from YCbCr to ycc + yc := imageYCbCrToYCC(m) + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + ystride := 3 * (m.Rect.Max.X - m.Rect.Min.X) + xstride := 3 + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + si := (y * ystride) + (x * xstride) + if m.Y[yi] != yc.Pix[si] { + t.Errorf("Err Y - found: %d expected: %d x: %d y: %d yi: %d si: %d", + m.Y[yi], yc.Pix[si], x, y, yi, si) + } + if m.Cb[ci] != yc.Pix[si+1] { + t.Errorf("Err Cb - found: %d expected: %d x: %d y: %d ci: %d si: %d", + m.Cb[ci], yc.Pix[si+1], x, y, ci, si+1) + } + if m.Cr[ci] != yc.Pix[si+2] { + t.Errorf("Err Cr - found: %d expected: %d x: %d y: %d ci: %d si: %d", + m.Cr[ci], yc.Pix[si+2], x, y, ci, si+2) + } + } + } + + // test conversion from ycc back to YCbCr + ym := yc.YCbCr() + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + if m.Y[yi] != ym.Y[yi] { + t.Errorf("Err Y - found: %d expected: %d x: %d y: %d yi: %d", + m.Y[yi], ym.Y[yi], x, y, yi) + } + if m.Cb[ci] != ym.Cb[ci] { + t.Errorf("Err Cb - found: %d expected: %d x: %d y: %d ci: %d", + m.Cb[ci], ym.Cb[ci], x, y, ci) + } + if m.Cr[ci] != ym.Cr[ci] { + t.Errorf("Err Cr - found: %d expected: %d x: %d y: %d ci: %d", + m.Cr[ci], ym.Cr[ci], x, y, ci) + } + } + } + } +} + +func TestYCbCr(t *testing.T) { + rects := []image.Rectangle{ + image.Rect(0, 0, 16, 16), + image.Rect(1, 0, 16, 16), + image.Rect(0, 1, 16, 16), + image.Rect(1, 1, 16, 16), + image.Rect(1, 1, 15, 16), + image.Rect(1, 1, 16, 15), + image.Rect(1, 1, 15, 15), + image.Rect(2, 3, 14, 15), + image.Rect(7, 0, 7, 16), + image.Rect(0, 8, 16, 8), + image.Rect(0, 0, 10, 11), + image.Rect(5, 6, 16, 16), + image.Rect(7, 7, 8, 8), + image.Rect(7, 8, 8, 9), + image.Rect(8, 7, 9, 8), + image.Rect(8, 8, 9, 9), + image.Rect(7, 7, 17, 17), + image.Rect(8, 8, 17, 17), + image.Rect(9, 9, 17, 17), + image.Rect(10, 10, 17, 17), + } + subsampleRatios := []image.YCbCrSubsampleRatio{ + image.YCbCrSubsampleRatio444, + image.YCbCrSubsampleRatio422, + image.YCbCrSubsampleRatio420, + image.YCbCrSubsampleRatio440, + } + deltas := []image.Point{ + image.Pt(0, 0), + image.Pt(1000, 1001), + image.Pt(5001, -400), + image.Pt(-701, -801), + } + for _, r := range rects { + for _, subsampleRatio := range subsampleRatios { + for _, delta := range deltas { + testYCbCr(t, r, subsampleRatio, delta) + } + } + if testing.Short() { + break + } + } +} + +func testYCbCr(t *testing.T, r image.Rectangle, subsampleRatio image.YCbCrSubsampleRatio, delta image.Point) { + // Create a YCbCr image m, whose bounds are r translated by (delta.X, delta.Y). + r1 := r.Add(delta) + img := image.NewYCbCr(r1, subsampleRatio) + + // Initialize img's pixels. For 422 and 420 subsampling, some of the Cb and Cr elements + // will be set multiple times. That's OK. We just want to avoid a uniform image. + for y := r1.Min.Y; y < r1.Max.Y; y++ { + for x := r1.Min.X; x < r1.Max.X; x++ { + yi := img.YOffset(x, y) + ci := img.COffset(x, y) + img.Y[yi] = uint8(16*y + x) + img.Cb[ci] = uint8(y + 16*x) + img.Cr[ci] = uint8(y + 16*x) + } + } + + m := imageYCbCrToYCC(img) + + // Make various sub-images of m. + for y0 := delta.Y + 3; y0 < delta.Y+7; y0++ { + for y1 := delta.Y + 8; y1 < delta.Y+13; y1++ { + for x0 := delta.X + 3; x0 < delta.X+7; x0++ { + for x1 := delta.X + 8; x1 < delta.X+13; x1++ { + subRect := image.Rect(x0, y0, x1, y1) + sub := m.SubImage(subRect).(*ycc) + + // For each point in the sub-image's bounds, check that m.At(x, y) equals sub.At(x, y). + for y := sub.Rect.Min.Y; y < sub.Rect.Max.Y; y++ { + for x := sub.Rect.Min.X; x < sub.Rect.Max.X; x++ { + color0 := m.At(x, y).(color.YCbCr) + color1 := sub.At(x, y).(color.YCbCr) + if color0 != color1 { + t.Errorf("r=%v, subsampleRatio=%v, delta=%v, x=%d, y=%d, color0=%v, color1=%v", + r, subsampleRatio, delta, x, y, color0, color1) + return + } + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 000000000..219994581 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go new file mode 100644 index 000000000..721bcac79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go new file mode 100644 index 000000000..ce5d8e4aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go @@ -0,0 +1,145 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccessorsAccessGetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) + +} +func TestAccessorsAccessGetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessGetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessGetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) + + assert.Panics(t, func() { + access(current, "names[2]", nil, false, true) + }) + assert.Nil(t, access(current, "names[2]", nil, false, false)) + +} + +func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsGet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + assert.Equal(t, "Tyler", current.Get("name").data) + +} + +func TestAccessorsAccessSetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + + access(current, "age", 29, true, true) + assert.Equal(t, current["age"], 29) + +} + +func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { + + current := map[string]interface{}{} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + +} + +func TestAccessorsAccessSetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + + access(current, "name.first", "Mat", true, true) + access(current, "name.last", "Ryer", true, true) + + assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessSetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + + access(current, "one.two.three.four", 5, true, true) + + assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessSetArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{"Tyler"}} + + access(current, "names[0]", "Mat", true, true) + + assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) + +} +func TestAccessorsAccessSetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + + access(current, "names[0].first", "Mat", true, true) + access(current, "names[0].last", "Ryer", true, true) + access(current, "names[1].first", "Captain", true, true) + access(current, "names[1].last", "Underpants", true, true) + + assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) + +} + +func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsSet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + current.Set("name", "Mat") + assert.Equal(t, "Mat", current.Get("name").data) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 000000000..306023475 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 000000000..379ffc3c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 000000000..b396900b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 000000000..069d43d8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go new file mode 100644 index 000000000..f9eb42a25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go new file mode 100644 index 000000000..9cdfa9f9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go new file mode 100644 index 000000000..e9ccd2987 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go @@ -0,0 +1,94 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConversionJSON(t *testing.T) { + + jsonString := `{"name":"Mat"}` + o := MustFromJSON(jsonString) + + result, err := o.JSON() + + if assert.NoError(t, err) { + assert.Equal(t, jsonString, result) + } + + assert.Equal(t, jsonString, o.MustJSON()) + +} + +func TestConversionJSONWithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustJSON() + }) + + _, err := o.JSON() + + assert.Error(t, err) + +} + +func TestConversionBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.Base64() + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) + +} + +func TestConversionBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustBase64() + }) + + _, err := o.Base64() + + assert.Error(t, err) + +} + +func TestConversionSignedBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.SignedBase64("key") + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) + +} + +func TestConversionSignedBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustSignedBase64("key") + }) + + _, err := o.SignedBase64("key") + + assert.Error(t, err) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go new file mode 100644 index 000000000..47bf85e46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go new file mode 100644 index 000000000..27f7d9049 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go @@ -0,0 +1,98 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +var fixtures = []struct { + // name is the name of the fixture (used for reporting + // failures) + name string + // data is the JSON data to be worked on + data string + // get is the argument(s) to pass to Get + get interface{} + // output is the expected output + output interface{} +}{ + { + name: "Simple get", + data: `{"name": "Mat"}`, + get: "name", + output: "Mat", + }, + { + name: "Get with dot notation", + data: `{"address": {"city": "Boulder"}}`, + get: "address.city", + output: "Boulder", + }, + { + name: "Deep get with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.two.three.four", + output: "hello", + }, + { + name: "Get missing with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.ten", + output: nil, + }, + { + name: "Get with array notation", + data: `{"tags": ["one", "two", "three"]}`, + get: "tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation", + data: `{"types": { "tags": ["one", "two", "three"]}}`, + get: "types.tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation - field after array", + data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, + get: "tags[1].name", + output: "two", + }, + { + name: "Complex get with array and dot notation", + data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, + get: "tags[0].list[0].one", + output: "pizza", + }, + { + name: "Get field from within string should be nil", + data: `{"name":"Tyler"}`, + get: "name.something", + output: nil, + }, + { + name: "Get field from within string (using array accessor) should be nil", + data: `{"numbers":["one", "two", "three"]}`, + get: "numbers[0].nope", + output: nil, + }, +} + +func TestFixtures(t *testing.T) { + + for _, fixture := range fixtures { + + m := MustFromJSON(fixture.data) + + // get the value + t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) + value := m.Get(fixture.get.(string)) + + // make sure it matches + assert.Equal(t, fixture.output, value.data, + "Get fixture \"%s\" failed: %v", fixture.name, fixture, + ) + + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map.go b/Godeps/_workspace/src/github.com/stretchr/objx/map.go new file mode 100644 index 000000000..eb6ed8e28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go new file mode 100644 index 000000000..6beb50675 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go @@ -0,0 +1,10 @@ +package objx + +var TestMap map[string]interface{} = map[string]interface{}{ + "name": "Tyler", + "address": map[string]interface{}{ + "city": "Salt Lake City", + "state": "UT", + }, + "numbers": []interface{}{"one", "two", "three", "four", "five"}, +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go new file mode 100644 index 000000000..1f8b45c61 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go @@ -0,0 +1,147 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +type Convertable struct { + name string +} + +func (c *Convertable) MSI() map[string]interface{} { + return map[string]interface{}{"name": c.name} +} + +type Unconvertable struct { + name string +} + +func TestMapCreation(t *testing.T) { + + o := New(nil) + assert.Nil(t, o) + + o = New("Tyler") + assert.Nil(t, o) + + unconvertable := &Unconvertable{name: "Tyler"} + o = New(unconvertable) + assert.Nil(t, o) + + convertable := &Convertable{name: "Tyler"} + o = New(convertable) + if assert.NotNil(t, convertable) { + assert.Equal(t, "Tyler", o["name"], "Tyler") + } + + o = MSI() + if assert.NotNil(t, o) { + assert.NotNil(t, o) + } + + o = MSI("name", "Tyler") + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, o["name"], "Tyler") + } + } + +} + +func TestMapMustFromJSONWithError(t *testing.T) { + + _, err := FromJSON(`"name":"Mat"}`) + assert.Error(t, err) + +} + +func TestMapFromJSON(t *testing.T) { + + o := MustFromJSON(`{"name":"Mat"}`) + + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, "Mat", o["name"]) + } + } + +} + +func TestMapFromJSONWithError(t *testing.T) { + + var m Map + + assert.Panics(t, func() { + m = MustFromJSON(`"name":"Mat"}`) + }) + + assert.Nil(t, m) + +} + +func TestMapFromBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=" + + o, err := FromBase64(base64String) + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") + +} + +func TestMapFromBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWFasd0In0=" + + _, err := FromBase64(base64String) + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromBase64(base64String) + }) + +} + +func TestMapFromSignedBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + o, err := FromSignedBase64(base64String, "key") + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") + +} + +func TestMapFromSignedBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + _, err := FromSignedBase64(base64String, "key") + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromSignedBase64(base64String, "key") + }) + +} + +func TestMapFromURLQuery(t *testing.T) { + + m, err := FromURLQuery("name=tyler&state=UT") + if assert.NoError(t, err) && assert.NotNil(t, m) { + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go new file mode 100644 index 000000000..b35c86392 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go new file mode 100644 index 000000000..e20ee23bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go @@ -0,0 +1,77 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestExclude(t *testing.T) { + + d := make(Map) + d["name"] = "Mat" + d["age"] = 29 + d["secret"] = "ABC" + + excluded := d.Exclude([]string{"secret"}) + + assert.Equal(t, d["name"], excluded["name"]) + assert.Equal(t, d["age"], excluded["age"]) + assert.False(t, excluded.Has("secret"), "secret should be excluded") + +} + +func TestCopy(t *testing.T) { + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + d1Obj := New(d1) + d2Obj := d1Obj.Copy() + + d2Obj["name"] = "Mat" + + assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") + assert.Equal(t, d2Obj.Get("name").Str(), "Mat") + +} + +func TestMerge(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.Merge(d1Obj) + + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Empty(t, dObj.Get("location").Str()) + +} + +func TestMergeHere(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.MergeHere(d1Obj) + + assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security.go b/Godeps/_workspace/src/github.com/stretchr/objx/security.go new file mode 100644 index 000000000..fdd6be9cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go new file mode 100644 index 000000000..8f0898f62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go @@ -0,0 +1,12 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHashWithKey(t *testing.T) { + + assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go new file mode 100644 index 000000000..5408c7fd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go @@ -0,0 +1,41 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSimpleExample(t *testing.T) { + + // build a map from a JSON object + o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) + + // Map can be used as a straight map[string]interface{} + assert.Equal(t, o["name"], "Mat") + + // Get an Value object + v := o.Get("name") + assert.Equal(t, v, &Value{data: "Mat"}) + + // Test the contained value + assert.False(t, v.IsInt()) + assert.False(t, v.IsBool()) + assert.True(t, v.IsStr()) + + // Get the contained value + assert.Equal(t, v.Str(), "Mat") + + // Get a default value if the contained value is not of the expected type or does not exist + assert.Equal(t, 1, v.Int(1)) + + // Get a value by using array notation + assert.Equal(t, "indian", o.Get("foods[0]").Data()) + + // Set a value by using array notation + o.Set("foods[0]", "italian") + assert.Equal(t, "italian", o.Get("foods[0]").Str()) + + // Get a value by using dot notation + assert.Equal(t, "hobbiton", o.Get("location.county").Str()) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go new file mode 100644 index 000000000..d9e0b479a --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go new file mode 100644 index 000000000..bcc1eb03d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go @@ -0,0 +1,24 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHas(t *testing.T) { + + m := New(TestMap) + + assert.True(t, m.Has("name")) + assert.True(t, m.Has("address.state")) + assert.True(t, m.Has("numbers[4]")) + + assert.False(t, m.Has("address.state.nope")) + assert.False(t, m.Has("address.nope")) + assert.False(t, m.Has("nope")) + assert.False(t, m.Has("numbers[5]")) + + m = nil + assert.False(t, m.Has("nothing")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 000000000..f3ecb29b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go new file mode 100644 index 000000000..f7a4fceea --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go @@ -0,0 +1,2867 @@ +package objx + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInter(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Inter()) + assert.Equal(t, val, New(m).Get("value").MustInter()) + assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) + assert.Equal(t, val, New(m).Get("nothing").Inter("something")) + + assert.Panics(t, func() { + New(m).Get("age").MustInter() + }) + +} + +func TestInterSlice(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) + assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) + assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInterSlice() + }) + +} + +func TestIsInter(t *testing.T) { + + var v *Value + + v = &Value{data: interface{}("something")} + assert.True(t, v.IsInter()) + + v = &Value{data: []interface{}{interface{}("something")}} + assert.True(t, v.IsInterSlice()) + +} + +func TestEachInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + count := 0 + replacedVals := make([]interface{}, 0) + assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) + +} + +func TestWhereInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + selected := v.WhereInter(func(i int, val interface{}) bool { + return i%2 == 0 + }).MustInterSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + grouped := v.GroupInter(func(i int, val interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + rawArr := v.MustInterSlice() + + replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInterSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + collected := v.CollectInter(func(index int, val interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestMSI(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSI()) + assert.Equal(t, val, New(m).Get("value").MustMSI()) + assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) + assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) + + assert.Panics(t, func() { + New(m).Get("age").MustMSI() + }) + +} + +func TestMSISlice(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) + assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) + assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustMSISlice() + }) + +} + +func TestIsMSI(t *testing.T) { + + var v *Value + + v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} + assert.True(t, v.IsMSI()) + + v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + assert.True(t, v.IsMSISlice()) + +} + +func TestEachMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + count := 0 + replacedVals := make([]map[string]interface{}, 0) + assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) + assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) + assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) + +} + +func TestWhereMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + rawArr := v.MustMSISlice() + + replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustMSISlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestObjxMap(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMap()) + assert.Equal(t, val, New(m).Get("value").MustObjxMap()) + assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) + + assert.Panics(t, func() { + New(m).Get("age").MustObjxMap() + }) + +} + +func TestObjxMapSlice(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) + assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustObjxMapSlice() + }) + +} + +func TestIsObjxMap(t *testing.T) { + + var v *Value + + v = &Value{data: (Map)(New(1))} + assert.True(t, v.IsObjxMap()) + + v = &Value{data: [](Map){(Map)(New(1))}} + assert.True(t, v.IsObjxMapSlice()) + +} + +func TestEachObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + count := 0 + replacedVals := make([](Map), 0) + assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) + +} + +func TestWhereObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + selected := v.WhereObjxMap(func(i int, val Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + grouped := v.GroupObjxMap(func(i int, val Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][](Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + rawArr := v.MustObjxMapSlice() + + replaced := v.ReplaceObjxMap(func(index int, val Map) Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustObjxMapSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + collected := v.CollectObjxMap(func(index int, val Map) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestBool(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Bool()) + assert.Equal(t, val, New(m).Get("value").MustBool()) + assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) + assert.Equal(t, val, New(m).Get("nothing").Bool(true)) + + assert.Panics(t, func() { + New(m).Get("age").MustBool() + }) + +} + +func TestBoolSlice(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": []bool{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) + assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) + assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustBoolSlice() + }) + +} + +func TestIsBool(t *testing.T) { + + var v *Value + + v = &Value{data: bool(true)} + assert.True(t, v.IsBool()) + + v = &Value{data: []bool{bool(true)}} + assert.True(t, v.IsBoolSlice()) + +} + +func TestEachBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} + count := 0 + replacedVals := make([]bool, 0) + assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) + +} + +func TestWhereBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + selected := v.WhereBool(func(i int, val bool) bool { + return i%2 == 0 + }).MustBoolSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + grouped := v.GroupBool(func(i int, val bool) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]bool) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + rawArr := v.MustBoolSlice() + + replaced := v.ReplaceBool(func(index int, val bool) bool { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustBoolSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + collected := v.CollectBool(func(index int, val bool) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestStr(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Str()) + assert.Equal(t, val, New(m).Get("value").MustStr()) + assert.Equal(t, string(""), New(m).Get("nothing").Str()) + assert.Equal(t, val, New(m).Get("nothing").Str("hello")) + + assert.Panics(t, func() { + New(m).Get("age").MustStr() + }) + +} + +func TestStrSlice(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": []string{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) + assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) + assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustStrSlice() + }) + +} + +func TestIsStr(t *testing.T) { + + var v *Value + + v = &Value{data: string("hello")} + assert.True(t, v.IsStr()) + + v = &Value{data: []string{string("hello")}} + assert.True(t, v.IsStrSlice()) + +} + +func TestEachStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + count := 0 + replacedVals := make([]string, 0) + assert.Equal(t, v, v.EachStr(func(i int, val string) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) + +} + +func TestWhereStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + selected := v.WhereStr(func(i int, val string) bool { + return i%2 == 0 + }).MustStrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + grouped := v.GroupStr(func(i int, val string) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]string) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + rawArr := v.MustStrSlice() + + replaced := v.ReplaceStr(func(index int, val string) string { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustStrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + collected := v.CollectStr(func(index int, val string) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int()) + assert.Equal(t, val, New(m).Get("value").MustInt()) + assert.Equal(t, int(0), New(m).Get("nothing").Int()) + assert.Equal(t, val, New(m).Get("nothing").Int(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt() + }) + +} + +func TestIntSlice(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": []int{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) + assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) + assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustIntSlice() + }) + +} + +func TestIsInt(t *testing.T) { + + var v *Value + + v = &Value{data: int(1)} + assert.True(t, v.IsInt()) + + v = &Value{data: []int{int(1)}} + assert.True(t, v.IsIntSlice()) + +} + +func TestEachInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} + count := 0 + replacedVals := make([]int, 0) + assert.Equal(t, v, v.EachInt(func(i int, val int) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) + +} + +func TestWhereInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + selected := v.WhereInt(func(i int, val int) bool { + return i%2 == 0 + }).MustIntSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + grouped := v.GroupInt(func(i int, val int) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + rawArr := v.MustIntSlice() + + replaced := v.ReplaceInt(func(index int, val int) int { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustIntSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + collected := v.CollectInt(func(index int, val int) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt8(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8()) + assert.Equal(t, val, New(m).Get("value").MustInt8()) + assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) + assert.Equal(t, val, New(m).Get("nothing").Int8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt8() + }) + +} + +func TestInt8Slice(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": []int8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) + assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt8Slice() + }) + +} + +func TestIsInt8(t *testing.T) { + + var v *Value + + v = &Value{data: int8(1)} + assert.True(t, v.IsInt8()) + + v = &Value{data: []int8{int8(1)}} + assert.True(t, v.IsInt8Slice()) + +} + +func TestEachInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} + count := 0 + replacedVals := make([]int8, 0) + assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) + +} + +func TestWhereInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + selected := v.WhereInt8(func(i int, val int8) bool { + return i%2 == 0 + }).MustInt8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + grouped := v.GroupInt8(func(i int, val int8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + rawArr := v.MustInt8Slice() + + replaced := v.ReplaceInt8(func(index int, val int8) int8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + collected := v.CollectInt8(func(index int, val int8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt16(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16()) + assert.Equal(t, val, New(m).Get("value").MustInt16()) + assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) + assert.Equal(t, val, New(m).Get("nothing").Int16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt16() + }) + +} + +func TestInt16Slice(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": []int16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) + assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt16Slice() + }) + +} + +func TestIsInt16(t *testing.T) { + + var v *Value + + v = &Value{data: int16(1)} + assert.True(t, v.IsInt16()) + + v = &Value{data: []int16{int16(1)}} + assert.True(t, v.IsInt16Slice()) + +} + +func TestEachInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} + count := 0 + replacedVals := make([]int16, 0) + assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) + +} + +func TestWhereInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + selected := v.WhereInt16(func(i int, val int16) bool { + return i%2 == 0 + }).MustInt16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + grouped := v.GroupInt16(func(i int, val int16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + rawArr := v.MustInt16Slice() + + replaced := v.ReplaceInt16(func(index int, val int16) int16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + collected := v.CollectInt16(func(index int, val int16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt32(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32()) + assert.Equal(t, val, New(m).Get("value").MustInt32()) + assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) + assert.Equal(t, val, New(m).Get("nothing").Int32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt32() + }) + +} + +func TestInt32Slice(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": []int32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) + assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt32Slice() + }) + +} + +func TestIsInt32(t *testing.T) { + + var v *Value + + v = &Value{data: int32(1)} + assert.True(t, v.IsInt32()) + + v = &Value{data: []int32{int32(1)}} + assert.True(t, v.IsInt32Slice()) + +} + +func TestEachInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} + count := 0 + replacedVals := make([]int32, 0) + assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) + +} + +func TestWhereInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + selected := v.WhereInt32(func(i int, val int32) bool { + return i%2 == 0 + }).MustInt32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + grouped := v.GroupInt32(func(i int, val int32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + rawArr := v.MustInt32Slice() + + replaced := v.ReplaceInt32(func(index int, val int32) int32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + collected := v.CollectInt32(func(index int, val int32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt64(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64()) + assert.Equal(t, val, New(m).Get("value").MustInt64()) + assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) + assert.Equal(t, val, New(m).Get("nothing").Int64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt64() + }) + +} + +func TestInt64Slice(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": []int64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) + assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt64Slice() + }) + +} + +func TestIsInt64(t *testing.T) { + + var v *Value + + v = &Value{data: int64(1)} + assert.True(t, v.IsInt64()) + + v = &Value{data: []int64{int64(1)}} + assert.True(t, v.IsInt64Slice()) + +} + +func TestEachInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} + count := 0 + replacedVals := make([]int64, 0) + assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) + +} + +func TestWhereInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + selected := v.WhereInt64(func(i int, val int64) bool { + return i%2 == 0 + }).MustInt64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + grouped := v.GroupInt64(func(i int, val int64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + rawArr := v.MustInt64Slice() + + replaced := v.ReplaceInt64(func(index int, val int64) int64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + collected := v.CollectInt64(func(index int, val int64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint()) + assert.Equal(t, val, New(m).Get("value").MustUint()) + assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) + assert.Equal(t, val, New(m).Get("nothing").Uint(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint() + }) + +} + +func TestUintSlice(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": []uint{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) + assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintSlice() + }) + +} + +func TestIsUint(t *testing.T) { + + var v *Value + + v = &Value{data: uint(1)} + assert.True(t, v.IsUint()) + + v = &Value{data: []uint{uint(1)}} + assert.True(t, v.IsUintSlice()) + +} + +func TestEachUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} + count := 0 + replacedVals := make([]uint, 0) + assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) + +} + +func TestWhereUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + selected := v.WhereUint(func(i int, val uint) bool { + return i%2 == 0 + }).MustUintSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + grouped := v.GroupUint(func(i int, val uint) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + rawArr := v.MustUintSlice() + + replaced := v.ReplaceUint(func(index int, val uint) uint { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + collected := v.CollectUint(func(index int, val uint) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint8(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8()) + assert.Equal(t, val, New(m).Get("value").MustUint8()) + assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) + assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint8() + }) + +} + +func TestUint8Slice(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) + assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint8Slice() + }) + +} + +func TestIsUint8(t *testing.T) { + + var v *Value + + v = &Value{data: uint8(1)} + assert.True(t, v.IsUint8()) + + v = &Value{data: []uint8{uint8(1)}} + assert.True(t, v.IsUint8Slice()) + +} + +func TestEachUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + count := 0 + replacedVals := make([]uint8, 0) + assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) + +} + +func TestWhereUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + selected := v.WhereUint8(func(i int, val uint8) bool { + return i%2 == 0 + }).MustUint8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + grouped := v.GroupUint8(func(i int, val uint8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + rawArr := v.MustUint8Slice() + + replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + collected := v.CollectUint8(func(index int, val uint8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint16(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16()) + assert.Equal(t, val, New(m).Get("value").MustUint16()) + assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) + assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint16() + }) + +} + +func TestUint16Slice(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) + assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint16Slice() + }) + +} + +func TestIsUint16(t *testing.T) { + + var v *Value + + v = &Value{data: uint16(1)} + assert.True(t, v.IsUint16()) + + v = &Value{data: []uint16{uint16(1)}} + assert.True(t, v.IsUint16Slice()) + +} + +func TestEachUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + count := 0 + replacedVals := make([]uint16, 0) + assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) + +} + +func TestWhereUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + selected := v.WhereUint16(func(i int, val uint16) bool { + return i%2 == 0 + }).MustUint16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + grouped := v.GroupUint16(func(i int, val uint16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + rawArr := v.MustUint16Slice() + + replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + collected := v.CollectUint16(func(index int, val uint16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint32(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32()) + assert.Equal(t, val, New(m).Get("value").MustUint32()) + assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) + assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint32() + }) + +} + +func TestUint32Slice(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) + assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint32Slice() + }) + +} + +func TestIsUint32(t *testing.T) { + + var v *Value + + v = &Value{data: uint32(1)} + assert.True(t, v.IsUint32()) + + v = &Value{data: []uint32{uint32(1)}} + assert.True(t, v.IsUint32Slice()) + +} + +func TestEachUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + count := 0 + replacedVals := make([]uint32, 0) + assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) + +} + +func TestWhereUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + selected := v.WhereUint32(func(i int, val uint32) bool { + return i%2 == 0 + }).MustUint32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + grouped := v.GroupUint32(func(i int, val uint32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + rawArr := v.MustUint32Slice() + + replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + collected := v.CollectUint32(func(index int, val uint32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint64(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64()) + assert.Equal(t, val, New(m).Get("value").MustUint64()) + assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) + assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint64() + }) + +} + +func TestUint64Slice(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) + assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint64Slice() + }) + +} + +func TestIsUint64(t *testing.T) { + + var v *Value + + v = &Value{data: uint64(1)} + assert.True(t, v.IsUint64()) + + v = &Value{data: []uint64{uint64(1)}} + assert.True(t, v.IsUint64Slice()) + +} + +func TestEachUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + count := 0 + replacedVals := make([]uint64, 0) + assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) + +} + +func TestWhereUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + selected := v.WhereUint64(func(i int, val uint64) bool { + return i%2 == 0 + }).MustUint64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + grouped := v.GroupUint64(func(i int, val uint64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + rawArr := v.MustUint64Slice() + + replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + collected := v.CollectUint64(func(index int, val uint64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUintptr(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uintptr()) + assert.Equal(t, val, New(m).Get("value").MustUintptr()) + assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) + assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUintptr() + }) + +} + +func TestUintptrSlice(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) + assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintptrSlice() + }) + +} + +func TestIsUintptr(t *testing.T) { + + var v *Value + + v = &Value{data: uintptr(1)} + assert.True(t, v.IsUintptr()) + + v = &Value{data: []uintptr{uintptr(1)}} + assert.True(t, v.IsUintptrSlice()) + +} + +func TestEachUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + count := 0 + replacedVals := make([]uintptr, 0) + assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) + +} + +func TestWhereUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + selected := v.WhereUintptr(func(i int, val uintptr) bool { + return i%2 == 0 + }).MustUintptrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + grouped := v.GroupUintptr(func(i int, val uintptr) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uintptr) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + rawArr := v.MustUintptrSlice() + + replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintptrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + collected := v.CollectUintptr(func(index int, val uintptr) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat32(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32()) + assert.Equal(t, val, New(m).Get("value").MustFloat32()) + assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) + assert.Equal(t, val, New(m).Get("nothing").Float32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat32() + }) + +} + +func TestFloat32Slice(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": []float32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) + assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat32Slice() + }) + +} + +func TestIsFloat32(t *testing.T) { + + var v *Value + + v = &Value{data: float32(1)} + assert.True(t, v.IsFloat32()) + + v = &Value{data: []float32{float32(1)}} + assert.True(t, v.IsFloat32Slice()) + +} + +func TestEachFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} + count := 0 + replacedVals := make([]float32, 0) + assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) + +} + +func TestWhereFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + selected := v.WhereFloat32(func(i int, val float32) bool { + return i%2 == 0 + }).MustFloat32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + grouped := v.GroupFloat32(func(i int, val float32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + rawArr := v.MustFloat32Slice() + + replaced := v.ReplaceFloat32(func(index int, val float32) float32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + collected := v.CollectFloat32(func(index int, val float32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat64(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64()) + assert.Equal(t, val, New(m).Get("value").MustFloat64()) + assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) + assert.Equal(t, val, New(m).Get("nothing").Float64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat64() + }) + +} + +func TestFloat64Slice(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": []float64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) + assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat64Slice() + }) + +} + +func TestIsFloat64(t *testing.T) { + + var v *Value + + v = &Value{data: float64(1)} + assert.True(t, v.IsFloat64()) + + v = &Value{data: []float64{float64(1)}} + assert.True(t, v.IsFloat64Slice()) + +} + +func TestEachFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} + count := 0 + replacedVals := make([]float64, 0) + assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) + +} + +func TestWhereFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + selected := v.WhereFloat64(func(i int, val float64) bool { + return i%2 == 0 + }).MustFloat64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + grouped := v.GroupFloat64(func(i int, val float64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + rawArr := v.MustFloat64Slice() + + replaced := v.ReplaceFloat64(func(index int, val float64) float64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + collected := v.CollectFloat64(func(index int, val float64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex64(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64()) + assert.Equal(t, val, New(m).Get("value").MustComplex64()) + assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) + assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex64() + }) + +} + +func TestComplex64Slice(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) + assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex64Slice() + }) + +} + +func TestIsComplex64(t *testing.T) { + + var v *Value + + v = &Value{data: complex64(1)} + assert.True(t, v.IsComplex64()) + + v = &Value{data: []complex64{complex64(1)}} + assert.True(t, v.IsComplex64Slice()) + +} + +func TestEachComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + count := 0 + replacedVals := make([]complex64, 0) + assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) + +} + +func TestWhereComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + selected := v.WhereComplex64(func(i int, val complex64) bool { + return i%2 == 0 + }).MustComplex64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + grouped := v.GroupComplex64(func(i int, val complex64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + rawArr := v.MustComplex64Slice() + + replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + collected := v.CollectComplex64(func(index int, val complex64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex128(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128()) + assert.Equal(t, val, New(m).Get("value").MustComplex128()) + assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) + assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex128() + }) + +} + +func TestComplex128Slice(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) + assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex128Slice() + }) + +} + +func TestIsComplex128(t *testing.T) { + + var v *Value + + v = &Value{data: complex128(1)} + assert.True(t, v.IsComplex128()) + + v = &Value{data: []complex128{complex128(1)}} + assert.True(t, v.IsComplex128Slice()) + +} + +func TestEachComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + count := 0 + replacedVals := make([]complex128, 0) + assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) + +} + +func TestWhereComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + selected := v.WhereComplex128(func(i int, val complex128) bool { + return i%2 == 0 + }).MustComplex128Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + grouped := v.GroupComplex128(func(i int, val complex128) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex128) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + rawArr := v.MustComplex128Slice() + + replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex128Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + collected := v.CollectComplex128(func(index int, val complex128) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value.go b/Godeps/_workspace/src/github.com/stretchr/objx/value.go new file mode 100644 index 000000000..7aaef06b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go new file mode 100644 index 000000000..0bc65d92c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go @@ -0,0 +1 @@ +package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 000000000..818cd7b0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,853 @@ +package assert + +import ( + "bufio" + "bytes" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + if reflect.DeepEqual(expected, actual) { + return true + } + + return false + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + expectedValue := reflect.ValueOf(expected) + if expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { + return true + } + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occured in calling code.*/ + +// CallerInfo returns a string containing the file and line number of the assert call +// that failed. +func CallerInfo() string { + + file := "" + line := 0 + ok := false + + for i := 0; ; i++ { + _, file, line, ok = runtime.Caller(i) + if !ok { + return "" + } + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + break + } + } + + return fmt.Sprintf("%s:%d", file, line) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + if len(message) > 0 { + t.Errorf("\r%s\r\tLocation:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\n\r", + getWhitespaceString(), + CallerInfo(), + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\r%s\r\tLocation:\t%s\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + CallerInfo(), + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + success := true + + if object == nil { + success = false + } else { + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + success = false + } + } + + if !success { + Fail(t, "Expected not to be nil.", msgAndArgs...) + } + + return success +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var zeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range zeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Ptr: + { + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is true. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, "Should not be equal", msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string or list(array, slice...) contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string or list(array, slice...) does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +// min(|expected|, |actual|) * epsilon +func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + // invalid input + return 0 + } + + if af < 0 { + af = -af + } + if bf < 0 { + bf = -bf + } + var delta float64 + if af < bf { + delta = af * epsilon + } else { + delta = bf * epsilon + } + return delta +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + delta := calcEpsilonDelta(expected, actual, epsilon) + + return InDelta(t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if isNil(err) { + return true + } + + return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + return NotNil(t, err, "An error is expected but got nil. %s", message) + +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, theError.Error(), errString, + s, errString, theError.Error(), message) +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 000000000..d859c77b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,791 @@ +package assert + +import ( + "errors" + "math" + "regexp" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if !NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return true") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) + +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + +} + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 000000000..f67810628 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,154 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +// +// Here is an overview of the assert functions: +// +// assert.Equal(t, expected, actual [, message [, format-args]]) +// +// assert.EqualValues(t, expected, actual [, message [, format-args]]) +// +// assert.NotEqual(t, notExpected, actual [, message [, format-args]]) +// +// assert.True(t, actualBool [, message [, format-args]]) +// +// assert.False(t, actualBool [, message [, format-args]]) +// +// assert.Nil(t, actualObject [, message [, format-args]]) +// +// assert.NotNil(t, actualObject [, message [, format-args]]) +// +// assert.Empty(t, actualObject [, message [, format-args]]) +// +// assert.NotEmpty(t, actualObject [, message [, format-args]]) +// +// assert.Len(t, actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(t, errorObject [, message [, format-args]]) +// +// assert.NoError(t, errorObject [, message [, format-args]]) +// +// assert.EqualError(t, theError, errString [, message [, format-args]]) +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(t, expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.Panics(t, func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(t, func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(t, numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) +// +// assert package contains Assertions object. it has assertion methods. +// +// Here is an overview of the assert functions: +// assert.Equal(expected, actual [, message [, format-args]]) +// +// assert.EqualValues(expected, actual [, message [, format-args]]) +// +// assert.NotEqual(notExpected, actual [, message [, format-args]]) +// +// assert.True(actualBool [, message [, format-args]]) +// +// assert.False(actualBool [, message [, format-args]]) +// +// assert.Nil(actualObject [, message [, format-args]]) +// +// assert.NotNil(actualObject [, message [, format-args]]) +// +// assert.Empty(actualObject [, message [, format-args]]) +// +// assert.NotEmpty(actualObject [, message [, format-args]]) +// +// assert.Len(actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(errorObject [, message [, format-args]]) +// +// assert.NoError(errorObject [, message [, format-args]]) +// +// assert.EqualError(theError, errString [, message [, format-args]]) +// +// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.Panics(func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]]) +package assert diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 000000000..ac9dc9d1d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 000000000..d8d3f531e --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,265 @@ +package assert + +import "time" + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a +// slice with len == 0. +// +// assert.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a +// slice with len == 0. +// +// if assert.NotEmpty(obj) { +// assert.Equal("two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// assert.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// False asserts that the specified value is true. +// +// assert.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// Contains asserts that the specified string contains the specified substring. +// +// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(err) { +// assert.Equal(actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { + return NoError(a.t, theError, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { + return Error(a.t, theError, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 000000000..3df3f3917 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,511 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 000000000..1246e58e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,157 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, mode, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, mode, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, mode, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + } + + return !contains +} + +// +// Assertions Wrappers +// + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, mode, url, values) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, mode, url, values) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPError(a.t, handler, mode, url, values) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, mode, url, values, str) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, mode, url, values, str) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 000000000..684c2d5d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,86 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPStatuses(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) + assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) + assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 000000000..dd385074b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,43 @@ +// Provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 000000000..fa8747e29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,566 @@ +package mock + +import ( + "fmt" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Holds a channel that will be used to block the Return until it either + // recieves a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + Run func(Arguments) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top of this document. +type Mock struct { + + // The method name that is currently + // being referred to by the On method. + onMethodName string + + // An array of the arguments that are + // currently being referred to by the On method. + onMethodArguments Arguments + + // Represents the calls that are expected of + // an object. + ExpectedCalls []Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Mock { + m.onMethodName = methodName + m.onMethodArguments = arguments + + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + return m +} + +// Return finishes a description of an expectation of the method (and arguments) +// specified in the most recent On method call. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2) +func (m *Mock) Return(returnArguments ...interface{}) *Mock { + m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0, nil, nil}) + return m +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (m *Mock) Once() { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1 +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (m *Mock) Twice() { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2 +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (m *Mock) Times(i int) { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (m *Mock) WaitUntil(w <-chan time.Time) *Mock { + m.ExpectedCalls[len(m.ExpectedCalls)-1].WaitFor = w + return m +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (m *Mock) After(d time.Duration) *Mock { + return m.WaitUntil(time.After(d)) +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(function(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (m *Mock) Run(fn func(Arguments)) *Mock { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Run = fn + return m +} + +/* + Recording and responding to activity +*/ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, &call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + + diffCount := 0 + var closestCall *Call = nil + + for _, call := range m.ExpectedCalls { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = &call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string = "" + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + defer m.mutex.Unlock() + m.mutex.Lock() + + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + + found, call := m.findExpectedCall(functionName, arguments...) + + switch { + case found < 0: + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(functionName, arguments...) + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) + } + case call.Repeatability == 1: + call.Repeatability = -1 + m.ExpectedCalls[found] = *call + case call.Repeatability > 1: + call.Repeatability -= 1 + m.ExpectedCalls[found] = *call + } + + // add the call + m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0, nil, nil}) + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } + + if call.Run != nil { + call.Run(arguments) + } + + return call.ReturnArguments + +} + +/* + Assertions +*/ + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + var success bool = true + for _, obj := range testObjects { + mockObj := obj.(Mock) + success = success && mockObj.AssertExpectations(t) + } + return success +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + + var somethingMissing bool = false + var failedExpectations int = 0 + + // iterate through each expectation + for _, expectedCall := range m.ExpectedCalls { + switch { + case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments): + somethingMissing = true + failedExpectations++ + t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + case expectedCall.Repeatability > 0: + somethingMissing = true + failedExpectations++ + default: + t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + var actualCalls int = 0 + for _, call := range m.Calls { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%v", m.ExpectedCalls) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%v", m.ExpectedCalls) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.Calls { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // The "any" argument. Used in Diff and Assert when + // the argument being tested shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output string = "\n" + var differences int + + var maxArgCount int = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index int = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 000000000..b7446accb --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,843 @@ +package mock + +import ( + "errors" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Called(yesorno) +} + +type ExampleType struct { + ran bool +} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Called(et) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error { + args := i.Called(fn) + return args.Error(0) +} + +type ExampleFuncType func(string) error + +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error { + args := i.Called(fn) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + + } + +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethod"), &mockedService.Mock) + assert.Equal(t, "TheExampleMethod", mockedService.onMethodName) + +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethod", 1, 2, 3), &mockedService.Mock) + assert.Equal(t, "TheExampleMethod", mockedService.onMethodName) + assert.Equal(t, 1, mockedService.onMethodArguments[0]) + assert.Equal(t, 2, mockedService.onMethodArguments[1]) + assert.Equal(t, 3, mockedService.onMethodArguments[2]) + +} + +func Test_Mock_On_WithFuncArg(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethodFunc", AnythingOfType("func(string) error")).Return(nil), &mockedService.Mock) + assert.Equal(t, "TheExampleMethodFunc", mockedService.onMethodName) + assert.Equal(t, AnythingOfType("func(string) error"), mockedService.onMethodArguments[0]) + + fn := func(string) error { return nil } + mockedService.TheExampleMethodFunc(fn) + +} + +func Test_Mock_On_WithFuncPanics(t *testing.T) { + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Panics(t, func() { + mockedService.On("TheExampleMethodFunc", func(string) error { return nil }) + }) +} + +func Test_Mock_On_WithFuncTypeArg(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")).Return(nil), &mockedService.Mock) + assert.Equal(t, "TheExampleMethodFuncType", mockedService.onMethodName) + assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), mockedService.onMethodArguments[0]) + + fn := func(string) error { return nil } + mockedService.TheExampleMethodFuncType(fn) + +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Nil(t, call.WaitFor) + + } + +} + +func Test_Mock_Return_WaitUntil(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + ch := time.After(time.Second) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).WaitUntil(ch), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Equal(t, ch, call.WaitFor) + + } + +} + +func Test_Mock_Return_After(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).After(time.Second), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + + } + +} + +func Test_Mock_Return_Run(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Run(func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + }), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) + + } + + et := ExampleType{} + assert.Equal(t, false, et.ran) + mockedService.TheExampleMethod3(&et) + assert.Equal(t, true, et.ran) + +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Once() + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + assert.Nil(t, call.WaitFor) + + } + +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Twice() + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + assert.Nil(t, call.WaitFor) + + } + +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Times(5) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + assert.Nil(t, call.WaitFor) + + } + +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.On("TheExampleMethod", "A", "B", true).Return(), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.ExpectedCalls)) { + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) + + } + +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func asyncCall(m *Mock, ch chan Arguments) { + ch <- m.Called(1, 2, 3) +} + +func Test_Mock_Called_blocks(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond) + + ch := make(chan Arguments) + + go asyncCall(&mockedService.Mock, ch) + + select { + case <-ch: + t.Fatal("should have waited") + case <-time.After(1 * time.Millisecond): + } + + returnArguments := <-ch + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(5, "6", true).Once() + mockedService.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(-1, "hi", false) + + returnArguments1 := mockedService.Called(1, 2, 3) + returnArguments2 := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) + assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything).Return() + + mockedService.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Called(1, 2, 3) + mockedService.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args Arguments = []interface{}{"Hello World", 123, true} + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args Arguments = []interface{}{"string", Anything, true} + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args Arguments = []interface{}{"string", AnythingOfType("int"), true} + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args Arguments = []interface{}{"string", AnythingOfType("string"), true} + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Assert(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err error = errors.New("An Error") + var args Arguments = []interface{}{"string", 123, true, err} + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true, nil} + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, true, args.Bool(2)) + +} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE new file mode 100644 index 000000000..968b45384 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE @@ -0,0 +1,14 @@ +Copyright (c) 2013 Vaughan Newton + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go new file mode 100644 index 000000000..81aeb32f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go @@ -0,0 +1,123 @@ +// Package ini provides functions for parsing INI configuration files. +package ini + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) +) + +// ErrSyntax is returned when there is a syntax error in an INI file. +type ErrSyntax struct { + Line int + Source string // The contents of the erroneous line, without leading or trailing whitespace +} + +func (e ErrSyntax) Error() string { + return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) +} + +// A File represents a parsed INI file. +type File map[string]Section + +// A Section represents a single section of an INI file. +type Section map[string]string + +// Returns a named Section. A Section will be created if one does not already exist for the given name. +func (f File) Section(name string) Section { + section := f[name] + if section == nil { + section = make(Section) + f[name] = section + } + return section +} + +// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. +func (f File) Get(section, key string) (value string, ok bool) { + if s := f[section]; s != nil { + value, ok = s[key] + } + return +} + +// Loads INI data from a reader and stores the data in the File. +func (f File) Load(in io.Reader) (err error) { + bufin, ok := in.(*bufio.Reader) + if !ok { + bufin = bufio.NewReader(in) + } + return parseFile(bufin, f) +} + +// Loads INI data from a named file and stores the data in the File. +func (f File) LoadFile(file string) (err error) { + in, err := os.Open(file) + if err != nil { + return + } + defer in.Close() + return f.Load(in) +} + +func parseFile(in *bufio.Reader, file File) (err error) { + section := "" + lineNum := 0 + for done := false; !done; { + var line string + if line, err = in.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + return + } + } + lineNum++ + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := groups[1], groups[2] + key, val = strings.TrimSpace(key), strings.TrimSpace(val) + file.Section(section)[key] = val + } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { + name := strings.TrimSpace(groups[1]) + section = name + // Create the section if it does not exist + file.Section(section) + } else { + return ErrSyntax{lineNum, line} + } + + } + return nil +} + +// Loads and returns a File from a reader. +func Load(in io.Reader) (File, error) { + file := make(File) + err := file.Load(in) + return file, err +} + +// Loads and returns an INI File from a file on disk. +func LoadFile(filename string) (File, error) { + file := make(File) + err := file.LoadFile(filename) + return file, err +} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go new file mode 100644 index 000000000..38a6f0004 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go @@ -0,0 +1,43 @@ +package ini + +import ( + "reflect" + "syscall" + "testing" +) + +func TestLoadFile(t *testing.T) { + originalOpenFiles := numFilesOpen(t) + + file, err := LoadFile("test.ini") + if err != nil { + t.Fatal(err) + } + + if originalOpenFiles != numFilesOpen(t) { + t.Error("test.ini not closed") + } + + if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) { + t.Error("file not read correctly") + } +} + +func numFilesOpen(t *testing.T) (num uint64) { + var rlimit syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatal(err) + } + maxFds := int(rlimit.Cur) + + var stat syscall.Stat_t + for i := 0; i < maxFds; i++ { + if syscall.Fstat(i, &stat) == nil { + num++ + } else { + return + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go new file mode 100644 index 000000000..06a4d05ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go @@ -0,0 +1,89 @@ +package ini + +import ( + "reflect" + "strings" + "testing" +) + +func TestLoad(t *testing.T) { + src := ` + # Comments are ignored + + herp = derp + + [foo] + hello=world + whitespace should = not matter + ; sneaky semicolon-style comment + multiple = equals = signs + + [bar] + this = that` + + file, err := Load(strings.NewReader(src)) + if err != nil { + t.Fatal(err) + } + check := func(section, key, expect string) { + if value, _ := file.Get(section, key); value != expect { + t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value) + } + } + + check("", "herp", "derp") + check("foo", "hello", "world") + check("foo", "whitespace should", "not matter") + check("foo", "multiple", "equals = signs") + check("bar", "this", "that") +} + +func TestSyntaxError(t *testing.T) { + src := ` + # Line 2 + [foo] + bar = baz + # Here's an error on line 6: + wut? + herp = derp` + _, err := Load(strings.NewReader(src)) + t.Logf("%T: %v", err, err) + if err == nil { + t.Fatal("expected an error, got nil") + } + syntaxErr, ok := err.(ErrSyntax) + if !ok { + t.Fatal("expected an error of type ErrSyntax") + } + if syntaxErr.Line != 6 { + t.Fatal("incorrect line number") + } + if syntaxErr.Source != "wut?" { + t.Fatal("incorrect source") + } +} + +func TestDefinedSectionBehaviour(t *testing.T) { + check := func(src string, expect File) { + file, err := Load(strings.NewReader(src)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(file, expect) { + t.Errorf("expected %v, got %v", expect, file) + } + } + // No sections for an empty file + check("", File{}) + // Default section only if there are actually values for it + check("foo=bar", File{"": {"foo": "bar"}}) + // User-defined sections should always be present, even if empty + check("[a]\n[b]\nfoo=bar", File{ + "a": {}, + "b": {"foo": "bar"}, + }) + check("foo=bar\n[a]\nthis=that", File{ + "": {"foo": "bar"}, + "a": {"this": "that"}, + }) +} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini b/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini new file mode 100644 index 000000000..d13c999e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini @@ -0,0 +1,2 @@ +[default] +stuff = things diff --git a/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 000000000..9d80f1952 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go new file mode 100644 index 000000000..7afa1fdf3 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go @@ -0,0 +1,274 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +import "testing" + +type CryptTest struct { + key []byte + in []byte + out []byte +} + +// Test vector values are from http://www.schneier.com/code/vectors.txt. +var encryptTests = []CryptTest{ + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, + { + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}}, + { + []byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + []byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}}, + { + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}}, + + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}}, + { + []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}}, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, + { + []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}}, + { + []byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57}, + []byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42}, + []byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}}, + { + []byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E}, + []byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA}, + []byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}}, + { + []byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86}, + []byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72}, + []byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}}, + { + []byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E}, + []byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A}, + []byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}}, + { + []byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6}, + []byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2}, + []byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}}, + { + []byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE}, + []byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A}, + []byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}}, + { + []byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6}, + []byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2}, + []byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}}, + { + []byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE}, + []byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A}, + []byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}}, + { + []byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16}, + []byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02}, + []byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}}, + { + []byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F}, + []byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A}, + []byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}}, + { + []byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46}, + []byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32}, + []byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}}, + { + []byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E}, + []byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA}, + []byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}}, + { + []byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76}, + []byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62}, + []byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}}, + { + []byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07}, + []byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2}, + []byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}}, + { + []byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F}, + []byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA}, + []byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}}, + { + []byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7}, + []byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92}, + []byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}}, + { + []byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF}, + []byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A}, + []byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}}, + { + []byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6}, + []byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2}, + []byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}}, + { + []byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF}, + []byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A}, + []byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}}, + { + []byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}}, + { + []byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}}, + { + []byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}}, + { + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}}, + { + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}}, + { + []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}}, + { + []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + []byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}}, +} + +func TestCipherEncrypt(t *testing.T) { + for i, tt := range encryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) + continue + } + ct := make([]byte, len(tt.out)) + c.Encrypt(ct, tt.in) + for j, v := range ct { + if v != tt.out[j] { + t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j]) + break + } + } + } +} + +func TestCipherDecrypt(t *testing.T) { + for i, tt := range encryptTests { + c, err := NewCipher(tt.key) + if err != nil { + t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) + continue + } + pt := make([]byte, len(tt.in)) + c.Decrypt(pt, tt.out) + for j, v := range pt { + if v != tt.in[j] { + t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j]) + break + } + } + } +} + +func TestSaltedCipherKeyLength(t *testing.T) { + if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) { + t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0)) + } + + // A 57-byte key. One over the typical blowfish restriction. + key := []byte("012345678901234567890123456789012345678901234567890123456") + if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil { + t.Errorf("NewSaltedCipher with long key, gave error %#v", err) + } +} + +// Test vectors generated with Blowfish from OpenSSH. +var saltedVectors = [][8]byte{ + {0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e}, + {0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12}, + {0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad}, + {0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8}, + {0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8}, + {0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf}, + {0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9}, + {0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38}, + {0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4}, + {0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c}, + {0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5}, + {0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b}, + {0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47}, + {0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2}, + {0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19}, + {0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc}, + {0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93}, + {0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57}, + {0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08}, + {0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03}, + {0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f}, + {0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef}, + {0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71}, + {0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad}, + {0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe}, + {0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13}, + {0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe}, + {0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6}, + {0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6}, + {0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92}, + {0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56}, + {0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee}, +} + +func TestSaltedCipher(t *testing.T) { + var key, salt [32]byte + for i := range key { + key[i] = byte(i) + salt[i] = byte(i + 32) + } + for i, v := range saltedVectors { + c, err := NewSaltedCipher(key[:], salt[:i]) + if err != nil { + t.Fatal(err) + } + var buf [8]byte + c.Encrypt(buf[:], buf[:]) + if v != buf { + t.Errorf("%d: expected %x, got %x", i, v, buf) + } + } +} + +func BenchmarkExpandKeyWithSalt(b *testing.B) { + key := make([]byte, 32) + salt := make([]byte, 16) + c, _ := NewCipher(key) + for i := 0; i < b.N; i++ { + expandKeyWithSalt(key, salt, c) + } +} + +func BenchmarkExpandKey(b *testing.B) { + key := make([]byte, 32) + c, _ := NewCipher(key) + for i := 0; i < b.N; i++ { + ExpandKey(key, c) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 000000000..5019658a4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 000000000..8c5ee4cb0 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml new file mode 100644 index 000000000..ccca6bb4a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip + +install: + - go get launchpad.net/gocheck + - go get gopkg.in/bufio.v1 diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE new file mode 100644 index 000000000..07a316cbf --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The bufio Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile new file mode 100644 index 000000000..038ed47e9 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile @@ -0,0 +1,2 @@ +all: + go test gopkg.in/bufio.v1 diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go new file mode 100644 index 000000000..8b915605b --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go @@ -0,0 +1,413 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +// Simple byte buffer for marshaling data. + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. + lastRead readOp // last read operation, so that Unread* can work correctly. +} + +// The readOp constants describe the last action performed on +// the buffer, so that UnreadRune and UnreadByte can +// check for invalid usage. +type readOp int + +const ( + opInvalid readOp = iota // Non-read operation. + opReadRune // Read rune. + opRead // Any other read operation. +) + +// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. +var ErrTooLarge = errors.New("bytes.Buffer: too large") + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + b.lastRead = opInvalid + switch { + case n < 0 || n > b.Len(): + panic("bytes.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("bytes.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// WriteString appends the contents of s to the buffer, growing the buffer as +// needed. The return value n is the length of s; err is always nil. If the +// buffer becomes too large, WriteString will panic with ErrTooLarge. +func (b *Buffer) WriteString(s string) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(s)) + return copy(b.buf[m:], s), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + b.lastRead = opInvalid + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + b.lastRead = opInvalid + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("bytes.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + b.lastRead = opInvalid + m := b.grow(1) + b.buf[m] = c + return nil +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to the +// buffer, returning its length and an error, which is always nil but is +// included to match bufio.Writer's WriteRune. The buffer is grown as needed; +// if it becomes too large, WriteRune will panic with ErrTooLarge. +func (b *Buffer) WriteRune(r rune) (n int, err error) { + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + return 1, nil + } + n = utf8.EncodeRune(b.runeBytes[0:], r) + b.Write(b.runeBytes[0:n]) + return n, nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + if n > 0 { + b.lastRead = opRead + } + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + b.lastRead = opInvalid + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + if n > 0 { + b.lastRead = opRead + } + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + b.lastRead = opRead + return c, nil +} + +// ReadRune reads and returns the next UTF-8-encoded +// Unicode code point from the buffer. +// If no bytes are available, the error returned is io.EOF. +// If the bytes are an erroneous UTF-8 encoding, it +// consumes one byte and returns U+FFFD, 1. +func (b *Buffer) ReadRune() (r rune, size int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, 0, io.EOF + } + b.lastRead = opReadRune + c := b.buf[b.off] + if c < utf8.RuneSelf { + b.off++ + return rune(c), 1, nil + } + r, n := utf8.DecodeRune(b.buf[b.off:]) + b.off += n + return r, n, nil +} + +// UnreadRune unreads the last rune returned by ReadRune. +// If the most recent read or write operation on the buffer was +// not a ReadRune, UnreadRune returns an error. (In this regard +// it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Buffer) UnreadRune() error { + if b.lastRead != opReadRune { + return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune") + } + b.lastRead = opInvalid + if b.off > 0 { + _, n := utf8.DecodeLastRune(b.buf[0:b.off]) + b.off -= n + } + return nil +} + +// UnreadByte unreads the last byte returned by the most recent +// read operation. If write has happened since the last read, UnreadByte +// returns an error. +func (b *Buffer) UnreadByte() error { + if b.lastRead != opReadRune && b.lastRead != opRead { + return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read") + } + b.lastRead = opInvalid + if b.off > 0 { + b.off-- + } + return nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + b.lastRead = opRead + return line, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err error) { + slice, err := b.readSlice(delim) + return string(slice), err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } + +// NewBufferString creates and initializes a new Buffer using string s as its +// initial contents. It is intended to prepare a buffer to read an existing +// string. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBufferString(s string) *Buffer { + return &Buffer{buf: []byte(s)} +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go new file mode 100644 index 000000000..ca1ac2105 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go @@ -0,0 +1,527 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +import ( + "bytes" + "io" + "math/rand" + "runtime" + "testing" + "unicode/utf8" +) + +const N = 10000 // make this bigger for a larger (and slower) test +var data string // test data for write tests +var testBytes []byte // test data; same as data but as a slice. + +func init() { + testBytes = make([]byte, N) + for i := 0; i < N; i++ { + testBytes[i] = 'a' + byte(i%26) + } + data = string(testBytes) +} + +// Verify that contents of buf match the string s. +func check(t *testing.T, testname string, buf *Buffer, s string) { + bytes := buf.Bytes() + str := buf.String() + if buf.Len() != len(bytes) { + t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) + } + + if buf.Len() != len(str) { + t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) + } + + if buf.Len() != len(s) { + t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) + } + + if string(bytes) != s { + t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) + } +} + +// Fill buf through n writes of string fus. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.WriteString(fus) + if m != len(fus) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += fus + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +// Fill buf through n writes of byte slice fub. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.Write(fub) + if m != len(fub) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += string(fub) + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +func TestNewBuffer(t *testing.T) { + buf := NewBuffer(testBytes) + check(t, "NewBuffer", buf, data) +} + +func TestNewBufferString(t *testing.T) { + buf := NewBufferString(data) + check(t, "NewBufferString", buf, data) +} + +// Empty buf through repeated reads into fub. +// The initial contents of buf corresponds to the string s. +func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { + check(t, testname+" (empty 1)", buf, s) + + for { + n, err := buf.Read(fub) + if n == 0 { + break + } + if err != nil { + t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) + } + s = s[n:] + check(t, testname+" (empty 3)", buf, s) + } + + check(t, testname+" (empty 4)", buf, "") +} + +func TestBasicOperations(t *testing.T) { + var buf Buffer + + for i := 0; i < 5; i++ { + check(t, "TestBasicOperations (1)", &buf, "") + + buf.Reset() + check(t, "TestBasicOperations (2)", &buf, "") + + buf.Truncate(0) + check(t, "TestBasicOperations (3)", &buf, "") + + n, err := buf.Write([]byte(data[0:1])) + if n != 1 { + t.Errorf("wrote 1 byte, but n == %d", n) + } + if err != nil { + t.Errorf("err should always be nil, but err == %s", err) + } + check(t, "TestBasicOperations (4)", &buf, "a") + + buf.WriteByte(data[1]) + check(t, "TestBasicOperations (5)", &buf, "ab") + + n, err = buf.Write([]byte(data[2:26])) + if n != 24 { + t.Errorf("wrote 25 bytes, but n == %d", n) + } + check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + + buf.Truncate(26) + check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + + buf.Truncate(20) + check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + + empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) + + buf.WriteByte(data[1]) + c, err := buf.ReadByte() + if err != nil { + t.Error("ReadByte unexpected eof") + } + if c != data[1] { + t.Errorf("ReadByte wrong value c=%v", c) + } + c, err = buf.ReadByte() + if err == nil { + t.Error("ReadByte unexpected not eof") + } + } +} + +func TestLargeStringWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data) + empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeStringWrites (3)", &buf, "") +} + +func TestLargeByteWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeByteWrites (3)", &buf, "") +} + +func TestLargeStringReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeStringReads (3)", &buf, "") +} + +func TestLargeByteReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeByteReads (3)", &buf, "") +} + +func TestMixedReadsAndWrites(t *testing.T) { + var buf Buffer + s := "" + for i := 0; i < 50; i++ { + wlen := rand.Intn(len(data)) + if i%2 == 0 { + s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen]) + } else { + s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) + } + + rlen := rand.Intn(len(data)) + fub := make([]byte, rlen) + n, _ := buf.Read(fub) + s = s[n:] + } + empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) +} + +func TestNil(t *testing.T) { + var b *Buffer + if b.String() != "" { + t.Errorf("expected ; got %q", b.String()) + } +} + +func TestReadFrom(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + b.ReadFrom(&buf) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + } +} + +func TestWriteTo(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + buf.WriteTo(&b) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + } +} + +func TestRuneIO(t *testing.T) { + const NRune = 1000 + // Built a test slice while we write the data + b := make([]byte, utf8.UTFMax*NRune) + var buf Buffer + n := 0 + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(b[n:], r) + nbytes, err := buf.WriteRune(r) + if err != nil { + t.Fatalf("WriteRune(%U) error: %s", r, err) + } + if nbytes != size { + t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes) + } + n += size + } + b = b[0:n] + + // Check the resulting bytes + if !bytes.Equal(buf.Bytes(), b) { + t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b) + } + + p := make([]byte, utf8.UTFMax) + // Read it back with ReadRune + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(p, r) + nr, nbytes, err := buf.ReadRune() + if nr != r || nbytes != size || err != nil { + t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err) + } + } + + // Check that UnreadRune works + buf.Reset() + buf.Write(b) + for r := rune(0); r < NRune; r++ { + r1, size, _ := buf.ReadRune() + if err := buf.UnreadRune(); err != nil { + t.Fatalf("UnreadRune(%U) got error %q", r, err) + } + r2, nbytes, err := buf.ReadRune() + if r1 != r2 || r1 != r || nbytes != size || err != nil { + t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err) + } + } +} + +func TestNext(t *testing.T) { + b := []byte{0, 1, 2, 3, 4} + tmp := make([]byte, 5) + for i := 0; i <= 5; i++ { + for j := i; j <= 5; j++ { + for k := 0; k <= 6; k++ { + // 0 <= i <= j <= 5; 0 <= k <= 6 + // Check that if we start with a buffer + // of length j at offset i and ask for + // Next(k), we get the right bytes. + buf := NewBuffer(b[0:j]) + n, _ := buf.Read(tmp[0:i]) + if n != i { + t.Fatalf("Read %d returned %d", i, n) + } + bb := buf.Next(k) + want := k + if want > j-i { + want = j - i + } + if len(bb) != want { + t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) + } + for l, v := range bb { + if v != byte(l+i) { + t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) + } + } + } + } + } +} + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err error +}{ + {"", 0, []string{""}, io.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBufferString(test.buffer) + var err error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func TestReadString(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBufferString(test.buffer) + var err error + for _, expected := range test.expected { + var s string + s, err = buf.ReadString(test.delim) + if s != expected { + t.Errorf("expected %q, got %q", expected, s) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func BenchmarkReadString(b *testing.B) { + const n = 32 << 10 + + data := make([]byte, n) + data[n-1] = 'x' + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + buf := NewBuffer(data) + _, err := buf.ReadString('x') + if err != nil { + b.Fatal(err) + } + } +} + +func TestGrow(t *testing.T) { + x := []byte{'x'} + y := []byte{'y'} + tmp := make([]byte, 72) + for _, startLen := range []int{0, 100, 1000, 10000, 100000} { + xBytes := bytes.Repeat(x, startLen) + for _, growLen := range []int{0, 100, 1000, 10000, 100000} { + buf := NewBuffer(xBytes) + // If we read, this affects buf.off, which is good to test. + readBytes, _ := buf.Read(tmp) + buf.Grow(growLen) + yBytes := bytes.Repeat(y, growLen) + // Check no allocation occurs in write, as long as we're single-threaded. + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + buf.Write(yBytes) + runtime.ReadMemStats(&m2) + if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { + t.Errorf("allocation occurred during write") + } + // Check that buffer has correct data. + if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { + t.Errorf("bad initial data at %d %d", startLen, growLen) + } + if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { + t.Errorf("bad written data at %d %d", startLen, growLen) + } + } + } +} + +// Was a bug: used to give EOF reading empty slice at EOF. +func TestReadEmptyAtEOF(t *testing.T) { + b := new(Buffer) + slice := make([]byte, 0) + n, err := b.Read(slice) + if err != nil { + t.Errorf("read error: %v", err) + } + if n != 0 { + t.Errorf("wrong count; got %d want 0", n) + } +} + +func TestBufferUnreadByte(t *testing.T) { + b := new(Buffer) + b.WriteString("abcdefghijklmnopqrstuvwxyz") + + _, err := b.ReadBytes('m') + if err != nil { + t.Fatalf("ReadBytes: %v", err) + } + + err = b.UnreadByte() + if err != nil { + t.Fatalf("UnreadByte: %v", err) + } + c, err := b.ReadByte() + if err != nil { + t.Fatalf("ReadByte: %v", err) + } + if c != 'm' { + t.Errorf("ReadByte = %q; want %q", c, 'm') + } +} + +// Tests that we occasionally compact. Issue 5154. +func TestBufferGrowth(t *testing.T) { + var b Buffer + buf := make([]byte, 1024) + b.Write(buf[0:1]) + var cap0 int + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + if i == 0 { + cap0 = b.Cap() + } + } + cap1 := b.Cap() + // (*Buffer).grow allows for 2x capacity slop before sliding, + // so set our error threshold at 3x. + if cap1 > cap0*3 { + t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) + } +} + +// From Issue 5154. +func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf[0:1]) + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + } + } +} + +// Check that we don't compact too often. From Issue 5154. +func BenchmarkBufferFullSmallReads(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf) + for b.Len()+20 < b.Cap() { + b.Write(buf[:10]) + } + for i := 0; i < 5<<10; i++ { + b.Read(buf[:1]) + b.Write(buf[:1]) + } + } +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go new file mode 100644 index 000000000..8f5cdc084 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go @@ -0,0 +1,728 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer +// object, creating another object (Reader or Writer) that also implements +// the interface but provides buffering and some help for textual I/O. +package bufio + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +const ( + defaultBufSize = 4096 +) + +var ( + ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte") + ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune") + ErrBufferFull = errors.New("bufio: buffer full") + ErrNegativeCount = errors.New("bufio: negative count") +) + +// Buffered input. + +// Reader implements buffering for an io.Reader object. +type Reader struct { + buf []byte + rd io.Reader + r, w int + err error + lastByte int + lastRuneSize int +} + +const minReadBufferSize = 16 +const maxConsecutiveEmptyReads = 100 + +// NewReaderSize returns a new Reader whose buffer has at least the specified +// size. If the argument io.Reader is already a Reader with large enough +// size, it returns the underlying Reader. +func NewReaderSize(rd io.Reader, size int) *Reader { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok && len(b.buf) >= size { + return b + } + if size < minReadBufferSize { + size = minReadBufferSize + } + r := new(Reader) + r.reset(make([]byte, size), rd) + return r +} + +// NewReader returns a new Reader whose buffer has the default size. +func NewReader(rd io.Reader) *Reader { + return NewReaderSize(rd, defaultBufSize) +} + +// Reset discards any buffered data, resets all state, and switches +// the buffered reader to read from r. +func (b *Reader) Reset(r io.Reader) { + b.reset(b.buf, r) +} + +func (b *Reader) reset(buf []byte, r io.Reader) { + *b = Reader{ + buf: buf, + rd: r, + lastByte: -1, + lastRuneSize: -1, + } +} + +var errNegativeRead = errors.New("bufio: reader returned negative count from Read") + +// fill reads a new chunk into the buffer. +func (b *Reader) fill() { + // Slide existing data to beginning. + if b.r > 0 { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + if b.w >= len(b.buf) { + panic("bufio: tried to fill full buffer") + } + + // Read new data: try a limited number of times. + for i := maxConsecutiveEmptyReads; i > 0; i-- { + n, err := b.rd.Read(b.buf[b.w:]) + if n < 0 { + panic(errNegativeRead) + } + b.w += n + if err != nil { + b.err = err + return + } + if n > 0 { + return + } + } + b.err = io.ErrNoProgress +} + +func (b *Reader) readErr() error { + err := b.err + b.err = nil + return err +} + +// Peek returns the next n bytes without advancing the reader. The bytes stop +// being valid at the next read call. If Peek returns fewer than n bytes, it +// also returns an error explaining why the read is short. The error is +// ErrBufferFull if n is larger than b's buffer size. +func (b *Reader) Peek(n int) ([]byte, error) { + if n < 0 { + return nil, ErrNegativeCount + } + if n > len(b.buf) { + return nil, ErrBufferFull + } + // 0 <= n <= len(b.buf) + for b.w-b.r < n && b.err == nil { + b.fill() // b.w-b.r < len(b.buf) => buffer is not full + } + m := b.w - b.r + if m > n { + m = n + } + var err error + if m < n { + err = b.readErr() + if err == nil { + err = ErrBufferFull + } + } + return b.buf[b.r : b.r+m], err +} + +// Read reads data into p. +// It returns the number of bytes read into p. +// It calls Read at most once on the underlying Reader, +// hence n may be less than len(p). +// At EOF, the count will be zero and err will be io.EOF. +func (b *Reader) Read(p []byte) (n int, err error) { + n = len(p) + if n == 0 { + return 0, b.readErr() + } + if b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + if len(p) >= len(b.buf) { + // Large read, empty buffer. + // Read directly into p to avoid copy. + n, b.err = b.rd.Read(p) + if n < 0 { + panic(errNegativeRead) + } + if n > 0 { + b.lastByte = int(p[n-1]) + b.lastRuneSize = -1 + } + return n, b.readErr() + } + b.fill() // buffer is empty + if b.w == b.r { + return 0, b.readErr() + } + } + + if n > b.w-b.r { + n = b.w - b.r + } + copy(p[0:n], b.buf[b.r:]) + b.r += n + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = -1 + return n, nil +} + +// ReadByte reads and returns a single byte. +// If no byte is available, returns an error. +func (b *Reader) ReadByte() (c byte, err error) { + b.lastRuneSize = -1 + for b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + b.fill() // buffer is empty + } + c = b.buf[b.r] + b.r++ + b.lastByte = int(c) + return c, nil +} + +// UnreadByte unreads the last byte. Only the most recently read byte can be unread. +func (b *Reader) UnreadByte() error { + if b.lastByte < 0 || b.r == 0 && b.w > 0 { + return ErrInvalidUnreadByte + } + // b.r > 0 || b.w == 0 + if b.r > 0 { + b.r-- + } else { + // b.r == 0 && b.w == 0 + b.w = 1 + } + b.buf[b.r] = byte(b.lastByte) + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// ReadRune reads a single UTF-8 encoded Unicode character and returns the +// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte +// and returns unicode.ReplacementChar (U+FFFD) with a size of 1. +func (b *Reader) ReadRune() (r rune, size int, err error) { + for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) { + b.fill() // b.w-b.r < len(buf) => buffer is not full + } + b.lastRuneSize = -1 + if b.r == b.w { + return 0, 0, b.readErr() + } + r, size = rune(b.buf[b.r]), 1 + if r >= 0x80 { + r, size = utf8.DecodeRune(b.buf[b.r:b.w]) + } + b.r += size + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = size + return r, size, nil +} + +// UnreadRune unreads the last rune. If the most recent read operation on +// the buffer was not a ReadRune, UnreadRune returns an error. (In this +// regard it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Reader) UnreadRune() error { + if b.lastRuneSize < 0 || b.r < b.lastRuneSize { + return ErrInvalidUnreadRune + } + b.r -= b.lastRuneSize + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// Buffered returns the number of bytes that can be read from the current buffer. +func (b *Reader) Buffered() int { return b.w - b.r } + +// ReadSlice reads until the first occurrence of delim in the input, +// returning a slice pointing at the bytes in the buffer. +// The bytes stop being valid at the next read. +// If ReadSlice encounters an error before finding a delimiter, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim. +// Because the data returned from ReadSlice will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +// ReadSlice returns err != nil if and only if line does not end in delim. +func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { + for { + // Search buffer. + if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 { + line = b.buf[b.r : b.r+i+1] + b.r += i + 1 + break + } + + // Pending error? + if b.err != nil { + line = b.buf[b.r:b.w] + b.r = b.w + err = b.readErr() + break + } + + // Buffer full? + if n := b.Buffered(); n >= len(b.buf) { + b.r = b.w + line = b.buf + err = ErrBufferFull + break + } + + b.fill() // buffer is not full + } + + // Handle last byte, if any. + if i := len(line) - 1; i >= 0 { + b.lastByte = int(line[i]) + } + + return +} + +// ReadN tries to read exactly n bytes. +// The bytes stop being valid at the next read call. +// If ReadN encounters an error before reading n bytes, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadN fails with error ErrBufferFull if the buffer fills +// without reading N bytes. +// Because the data returned from ReadN will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +func (b *Reader) ReadN(n int) ([]byte, error) { + for b.Buffered() < n { + if b.err != nil { + buf := b.buf[b.r:b.w] + b.r = b.w + return buf, b.readErr() + } + + // Buffer is full? + if b.Buffered() >= len(b.buf) { + b.r = b.w + return b.buf, ErrBufferFull + } + + b.fill() + } + buf := b.buf[b.r : b.r+n] + b.r += n + return buf, nil +} + +// ReadLine is a low-level line-reading primitive. Most callers should use +// ReadBytes('\n') or ReadString('\n') instead or use a Scanner. +// +// ReadLine tries to return a single line, not including the end-of-line bytes. +// If the line was too long for the buffer then isPrefix is set and the +// beginning of the line is returned. The rest of the line will be returned +// from future calls. isPrefix will be false when returning the last fragment +// of the line. The returned buffer is only valid until the next call to +// ReadLine. ReadLine either returns a non-nil line or it returns an error, +// never both. +// +// The text returned from ReadLine does not include the line end ("\r\n" or "\n"). +// No indication or error is given if the input ends without a final line end. +// Calling UnreadByte after ReadLine will always unread the last byte read +// (possibly a character belonging to the line end) even if that byte is not +// part of the line returned by ReadLine. +func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) { + line, err = b.ReadSlice('\n') + if err == ErrBufferFull { + // Handle the case where "\r\n" straddles the buffer. + if len(line) > 0 && line[len(line)-1] == '\r' { + // Put the '\r' back on buf and drop it from line. + // Let the next call to ReadLine check for "\r\n". + if b.r == 0 { + // should be unreachable + panic("bufio: tried to rewind past start of buffer") + } + b.r-- + line = line[:len(line)-1] + } + return line, true, nil + } + + if len(line) == 0 { + if err != nil { + line = nil + } + return + } + err = nil + + if line[len(line)-1] == '\n' { + drop := 1 + if len(line) > 1 && line[len(line)-2] == '\r' { + drop = 2 + } + line = line[:len(line)-drop] + } + return +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadBytes(delim byte) (line []byte, err error) { + // Use ReadSlice to look for array, + // accumulating full buffers. + var frag []byte + var full [][]byte + err = nil + + for { + var e error + frag, e = b.ReadSlice(delim) + if e == nil { // got final fragment + break + } + if e != ErrBufferFull { // unexpected error + err = e + break + } + + // Make a copy of the buffer. + buf := make([]byte, len(frag)) + copy(buf, frag) + full = append(full, buf) + } + + // Allocate new buffer to hold the full pieces and the fragment. + n := 0 + for i := range full { + n += len(full[i]) + } + n += len(frag) + + // Copy full pieces and fragment in. + buf := make([]byte, n) + n = 0 + for i := range full { + n += copy(buf[n:], full[i]) + } + copy(buf[n:], frag) + return buf, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadString(delim byte) (line string, err error) { + bytes, err := b.ReadBytes(delim) + line = string(bytes) + return line, err +} + +// WriteTo implements io.WriterTo. +func (b *Reader) WriteTo(w io.Writer) (n int64, err error) { + n, err = b.writeBuf(w) + if err != nil { + return + } + + if r, ok := b.rd.(io.WriterTo); ok { + m, err := r.WriteTo(w) + n += m + return n, err + } + + if w, ok := w.(io.ReaderFrom); ok { + m, err := w.ReadFrom(b.rd) + n += m + return n, err + } + + if b.w-b.r < len(b.buf) { + b.fill() // buffer not full + } + + for b.r < b.w { + // b.r < b.w => buffer is not empty + m, err := b.writeBuf(w) + n += m + if err != nil { + return n, err + } + b.fill() // buffer is empty + } + + if b.err == io.EOF { + b.err = nil + } + + return n, b.readErr() +} + +// writeBuf writes the Reader's buffer to the writer. +func (b *Reader) writeBuf(w io.Writer) (int64, error) { + n, err := w.Write(b.buf[b.r:b.w]) + if n < b.r-b.w { + panic(errors.New("bufio: writer did not write all data")) + } + b.r += n + return int64(n), err +} + +// buffered output + +// Writer implements buffering for an io.Writer object. +// If an error occurs writing to a Writer, no more data will be +// accepted and all subsequent writes will return the error. +// After all data has been written, the client should call the +// Flush method to guarantee all data has been forwarded to +// the underlying io.Writer. +type Writer struct { + err error + buf []byte + n int + wr io.Writer +} + +// NewWriterSize returns a new Writer whose buffer has at least the specified +// size. If the argument io.Writer is already a Writer with large enough +// size, it returns the underlying Writer. +func NewWriterSize(w io.Writer, size int) *Writer { + // Is it already a Writer? + b, ok := w.(*Writer) + if ok && len(b.buf) >= size { + return b + } + if size <= 0 { + size = defaultBufSize + } + return &Writer{ + buf: make([]byte, size), + wr: w, + } +} + +// NewWriter returns a new Writer whose buffer has the default size. +func NewWriter(w io.Writer) *Writer { + return NewWriterSize(w, defaultBufSize) +} + +// Reset discards any unflushed buffered data, clears any error, and +// resets b to write its output to w. +func (b *Writer) Reset(w io.Writer) { + b.err = nil + b.n = 0 + b.wr = w +} + +// Flush writes any buffered data to the underlying io.Writer. +func (b *Writer) Flush() error { + err := b.flush() + return err +} + +func (b *Writer) flush() error { + if b.err != nil { + return b.err + } + if b.n == 0 { + return nil + } + n, err := b.wr.Write(b.buf[0:b.n]) + if n < b.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < b.n { + copy(b.buf[0:b.n-n], b.buf[n:b.n]) + } + b.n -= n + b.err = err + return err + } + b.n = 0 + return nil +} + +// Available returns how many bytes are unused in the buffer. +func (b *Writer) Available() int { return len(b.buf) - b.n } + +// Buffered returns the number of bytes that have been written into the current buffer. +func (b *Writer) Buffered() int { return b.n } + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (b *Writer) Write(p []byte) (nn int, err error) { + for len(p) > b.Available() && b.err == nil { + var n int + if b.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, b.err = b.wr.Write(p) + } else { + n = copy(b.buf[b.n:], p) + b.n += n + b.flush() + } + nn += n + p = p[n:] + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], p) + b.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (b *Writer) WriteByte(c byte) error { + if b.err != nil { + return b.err + } + if b.Available() <= 0 && b.flush() != nil { + return b.err + } + b.buf[b.n] = c + b.n++ + return nil +} + +// WriteRune writes a single Unicode code point, returning +// the number of bytes written and any error. +func (b *Writer) WriteRune(r rune) (size int, err error) { + if r < utf8.RuneSelf { + err = b.WriteByte(byte(r)) + if err != nil { + return 0, err + } + return 1, nil + } + if b.err != nil { + return 0, b.err + } + n := b.Available() + if n < utf8.UTFMax { + if b.flush(); b.err != nil { + return 0, b.err + } + n = b.Available() + if n < utf8.UTFMax { + // Can only happen if buffer is silly small. + return b.WriteString(string(r)) + } + } + size = utf8.EncodeRune(b.buf[b.n:], r) + b.n += size + return size, nil +} + +// WriteString writes a string. +// It returns the number of bytes written. +// If the count is less than len(s), it also returns an error explaining +// why the write is short. +func (b *Writer) WriteString(s string) (int, error) { + nn := 0 + for len(s) > b.Available() && b.err == nil { + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + s = s[n:] + b.flush() + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + return nn, nil +} + +// ReadFrom implements io.ReaderFrom. +func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if b.Buffered() == 0 { + if w, ok := b.wr.(io.ReaderFrom); ok { + return w.ReadFrom(r) + } + } + var m int + for { + if b.Available() == 0 { + if err1 := b.flush(); err1 != nil { + return n, err1 + } + } + nr := 0 + for nr < maxConsecutiveEmptyReads { + m, err = r.Read(b.buf[b.n:]) + if m != 0 || err != nil { + break + } + nr++ + } + if nr == maxConsecutiveEmptyReads { + return n, io.ErrNoProgress + } + b.n += m + n += int64(m) + if err != nil { + break + } + } + if err == io.EOF { + // If we filled the buffer exactly, flush pre-emptively. + if b.Available() == 0 { + err = b.flush() + } else { + err = nil + } + } + return n, err +} + +// buffered input and output + +// ReadWriter stores pointers to a Reader and a Writer. +// It implements io.ReadWriter. +type ReadWriter struct { + *Reader + *Writer +} + +// NewReadWriter allocates a new ReadWriter that dispatches to r and w. +func NewReadWriter(r *Reader, w *Writer) *ReadWriter { + return &ReadWriter{r, w} +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go new file mode 100644 index 000000000..f19d9bd28 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go @@ -0,0 +1,1418 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "testing/iotest" + "time" + "unicode/utf8" + + . "gopkg.in/bufio.v1" +) + +// Reads from a reader and rot13s the result. +type rot13Reader struct { + r io.Reader +} + +func newRot13Reader(r io.Reader) *rot13Reader { + r13 := new(rot13Reader) + r13.r = r + return r13 +} + +func (r13 *rot13Reader) Read(p []byte) (int, error) { + n, err := r13.r.Read(p) + if err != nil { + return n, err + } + for i := 0; i < n; i++ { + c := p[i] | 0x20 // lowercase byte + if 'a' <= c && c <= 'm' { + p[i] += 13 + } else if 'n' <= c && c <= 'z' { + p[i] -= 13 + } + } + return n, nil +} + +// Call ReadByte to accumulate the text of a file +func readBytes(buf *Reader) string { + var b [1000]byte + nb := 0 + for { + c, err := buf.ReadByte() + if err == io.EOF { + break + } + if err == nil { + b[nb] = c + nb++ + } else if err != iotest.ErrTimeout { + panic("Data: " + err.Error()) + } + } + return string(b[0:nb]) +} + +func TestReaderSimple(t *testing.T) { + data := "hello world" + b := NewReader(strings.NewReader(data)) + if s := readBytes(b); s != "hello world" { + t.Errorf("simple hello world test failed: got %q", s) + } + + b = NewReader(newRot13Reader(strings.NewReader(data))) + if s := readBytes(b); s != "uryyb jbeyq" { + t.Errorf("rot13 hello world test failed: got %q", s) + } +} + +type readMaker struct { + name string + fn func(io.Reader) io.Reader +} + +var readMakers = []readMaker{ + {"full", func(r io.Reader) io.Reader { return r }}, + {"byte", iotest.OneByteReader}, + {"half", iotest.HalfReader}, + {"data+err", iotest.DataErrReader}, + {"timeout", iotest.TimeoutReader}, +} + +// Call ReadString (which ends up calling everything else) +// to accumulate the text of a file. +func readLines(b *Reader) string { + s := "" + for { + s1, err := b.ReadString('\n') + if err == io.EOF { + break + } + if err != nil && err != iotest.ErrTimeout { + panic("GetLines: " + err.Error()) + } + s += s1 + } + return s +} + +// Call Read to accumulate the text of a file +func reads(buf *Reader, m int) string { + var b [1000]byte + nb := 0 + for { + n, err := buf.Read(b[nb : nb+m]) + nb += n + if err == io.EOF { + break + } + } + return string(b[0:nb]) +} + +type bufReader struct { + name string + fn func(*Reader) string +} + +var bufreaders = []bufReader{ + {"1", func(b *Reader) string { return reads(b, 1) }}, + {"2", func(b *Reader) string { return reads(b, 2) }}, + {"3", func(b *Reader) string { return reads(b, 3) }}, + {"4", func(b *Reader) string { return reads(b, 4) }}, + {"5", func(b *Reader) string { return reads(b, 5) }}, + {"7", func(b *Reader) string { return reads(b, 7) }}, + {"bytes", readBytes}, + {"lines", readLines}, +} + +const minReadBufferSize = 16 + +var bufsizes = []int{ + 0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096, +} + +func TestReader(t *testing.T) { + var texts [31]string + str := "" + all := "" + for i := 0; i < len(texts)-1; i++ { + texts[i] = str + "\n" + all += texts[i] + str += string(i%26 + 'a') + } + texts[len(texts)-1] = all + + for h := 0; h < len(texts); h++ { + text := texts[h] + for i := 0; i < len(readMakers); i++ { + for j := 0; j < len(bufreaders); j++ { + for k := 0; k < len(bufsizes); k++ { + readmaker := readMakers[i] + bufreader := bufreaders[j] + bufsize := bufsizes[k] + read := readmaker.fn(strings.NewReader(text)) + buf := NewReaderSize(read, bufsize) + s := bufreader.fn(buf) + if s != text { + t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q", + readmaker.name, bufreader.name, bufsize, text, s) + } + } + } + } + } +} + +type zeroReader struct{} + +func (zeroReader) Read(p []byte) (int, error) { + return 0, nil +} + +func TestZeroReader(t *testing.T) { + var z zeroReader + r := NewReader(z) + + c := make(chan error) + go func() { + _, err := r.ReadByte() + c <- err + }() + + select { + case err := <-c: + if err == nil { + t.Error("error expected") + } else if err != io.ErrNoProgress { + t.Error("unexpected error:", err) + } + case <-time.After(time.Second): + t.Error("test timed out (endless loop in ReadByte?)") + } +} + +// A StringReader delivers its data one string segment at a time via Read. +type StringReader struct { + data []string + step int +} + +func (r *StringReader) Read(p []byte) (n int, err error) { + if r.step < len(r.data) { + s := r.data[r.step] + n = copy(p, s) + r.step++ + } else { + err = io.EOF + } + return +} + +func readRuneSegments(t *testing.T, segments []string) { + got := "" + want := strings.Join(segments, "") + r := NewReader(&StringReader{data: segments}) + for { + r, _, err := r.ReadRune() + if err != nil { + if err != io.EOF { + return + } + break + } + got += string(r) + } + if got != want { + t.Errorf("segments=%v got=%s want=%s", segments, got, want) + } +} + +var segmentList = [][]string{ + {}, + {""}, + {"日", "本語"}, + {"\u65e5", "\u672c", "\u8a9e"}, + {"\U000065e5", "\U0000672c", "\U00008a9e"}, + {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"}, + {"Hello", ", ", "World", "!"}, + {"Hello", ", ", "", "World", "!"}, +} + +func TestReadRune(t *testing.T) { + for _, s := range segmentList { + readRuneSegments(t, s) + } +} + +func TestUnreadRune(t *testing.T) { + segments := []string{"Hello, world:", "日本語"} + r := NewReader(&StringReader{data: segments}) + got := "" + want := strings.Join(segments, "") + // Normal execution. + for { + r1, _, err := r.ReadRune() + if err != nil { + if err != io.EOF { + t.Error("unexpected error on ReadRune:", err) + } + break + } + got += string(r1) + // Put it back and read it again. + if err = r.UnreadRune(); err != nil { + t.Fatal("unexpected error on UnreadRune:", err) + } + r2, _, err := r.ReadRune() + if err != nil { + t.Fatal("unexpected error reading after unreading:", err) + } + if r1 != r2 { + t.Fatalf("incorrect rune after unread: got %c, want %c", r1, r2) + } + } + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestReaderUnreadByte(t *testing.T) { + segments := []string{"Hello, ", "world"} + r := NewReader(&StringReader{data: segments}) + got := "" + want := strings.Join(segments, "") + // Normal execution. + for { + b1, err := r.ReadByte() + if err != nil { + if err != io.EOF { + t.Error("unexpected error on ReadByte:", err) + } + break + } + got += string(b1) + // Put it back and read it again. + if err = r.UnreadByte(); err != nil { + t.Fatal("unexpected error on UnreadByte:", err) + } + b2, err := r.ReadByte() + if err != nil { + t.Fatal("unexpected error reading after unreading:", err) + } + if b1 != b2 { + t.Fatalf("incorrect byte after unread: got %q, want %q", b1, b2) + } + } + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestUnreadByteMultiple(t *testing.T) { + segments := []string{"Hello, ", "world"} + data := strings.Join(segments, "") + for n := 0; n <= len(data); n++ { + r := NewReader(&StringReader{data: segments}) + // Read n bytes. + for i := 0; i < n; i++ { + b, err := r.ReadByte() + if err != nil { + t.Fatalf("n = %d: unexpected error on ReadByte: %v", n, err) + } + if b != data[i] { + t.Fatalf("n = %d: incorrect byte returned from ReadByte: got %q, want %q", n, b, data[i]) + } + } + // Unread one byte if there is one. + if n > 0 { + if err := r.UnreadByte(); err != nil { + t.Errorf("n = %d: unexpected error on UnreadByte: %v", n, err) + } + } + // Test that we cannot unread any further. + if err := r.UnreadByte(); err == nil { + t.Errorf("n = %d: expected error on UnreadByte", n) + } + } +} + +func TestUnreadByteOthers(t *testing.T) { + // A list of readers to use in conjunction with UnreadByte. + var readers = []func(*Reader, byte) ([]byte, error){ + (*Reader).ReadBytes, + (*Reader).ReadSlice, + func(r *Reader, delim byte) ([]byte, error) { + data, err := r.ReadString(delim) + return []byte(data), err + }, + // ReadLine doesn't fit the data/pattern easily + // so we leave it out. It should be covered via + // the ReadSlice test since ReadLine simply calls + // ReadSlice, and it's that function that handles + // the last byte. + } + + // Try all readers with UnreadByte. + for rno, read := range readers { + // Some input data that is longer than the minimum reader buffer size. + const n = 10 + var buf bytes.Buffer + for i := 0; i < n; i++ { + buf.WriteString("abcdefg") + } + + r := NewReaderSize(&buf, minReadBufferSize) + readTo := func(delim byte, want string) { + data, err := read(r, delim) + if err != nil { + t.Fatalf("#%d: unexpected error reading to %c: %v", rno, delim, err) + } + if got := string(data); got != want { + t.Fatalf("#%d: got %q, want %q", rno, got, want) + } + } + + // Read the data with occasional UnreadByte calls. + for i := 0; i < n; i++ { + readTo('d', "abcd") + for j := 0; j < 3; j++ { + if err := r.UnreadByte(); err != nil { + t.Fatalf("#%d: unexpected error on UnreadByte: %v", rno, err) + } + readTo('d', "d") + } + readTo('g', "efg") + } + + // All data should have been read. + _, err := r.ReadByte() + if err != io.EOF { + t.Errorf("#%d: got error %v; want EOF", rno, err) + } + } +} + +// Test that UnreadRune fails if the preceding operation was not a ReadRune. +func TestUnreadRuneError(t *testing.T) { + buf := make([]byte, 3) // All runes in this test are 3 bytes long + r := NewReader(&StringReader{data: []string{"日本語日本語日本語"}}) + if r.UnreadRune() == nil { + t.Error("expected error on UnreadRune from fresh buffer") + } + _, _, err := r.ReadRune() + if err != nil { + t.Error("unexpected error on ReadRune (1):", err) + } + if err = r.UnreadRune(); err != nil { + t.Error("unexpected error on UnreadRune (1):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after UnreadRune (1)") + } + // Test error after Read. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (2):", err) + } + _, err = r.Read(buf) + if err != nil { + t.Error("unexpected error on Read (2):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after Read (2)") + } + // Test error after ReadByte. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (2):", err) + } + for _ = range buf { + _, err = r.ReadByte() + if err != nil { + t.Error("unexpected error on ReadByte (2):", err) + } + } + if r.UnreadRune() == nil { + t.Error("expected error after ReadByte") + } + // Test error after UnreadByte. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (3):", err) + } + _, err = r.ReadByte() + if err != nil { + t.Error("unexpected error on ReadByte (3):", err) + } + err = r.UnreadByte() + if err != nil { + t.Error("unexpected error on UnreadByte (3):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after UnreadByte (3)") + } +} + +func TestUnreadRuneAtEOF(t *testing.T) { + // UnreadRune/ReadRune should error at EOF (was a bug; used to panic) + r := NewReader(strings.NewReader("x")) + r.ReadRune() + r.ReadRune() + r.UnreadRune() + _, _, err := r.ReadRune() + if err == nil { + t.Error("expected error at EOF") + } else if err != io.EOF { + t.Error("expected EOF; got", err) + } +} + +func TestReadWriteRune(t *testing.T) { + const NRune = 1000 + byteBuf := new(bytes.Buffer) + w := NewWriter(byteBuf) + // Write the runes out using WriteRune + buf := make([]byte, utf8.UTFMax) + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(buf, r) + nbytes, err := w.WriteRune(r) + if err != nil { + t.Fatalf("WriteRune(0x%x) error: %s", r, err) + } + if nbytes != size { + t.Fatalf("WriteRune(0x%x) expected %d, got %d", r, size, nbytes) + } + } + w.Flush() + + r := NewReader(byteBuf) + // Read them back with ReadRune + for r1 := rune(0); r1 < NRune; r1++ { + size := utf8.EncodeRune(buf, r1) + nr, nbytes, err := r.ReadRune() + if nr != r1 || nbytes != size || err != nil { + t.Fatalf("ReadRune(0x%x) got 0x%x,%d not 0x%x,%d (err=%s)", r1, nr, nbytes, r1, size, err) + } + } +} + +func TestWriter(t *testing.T) { + var data [8192]byte + + for i := 0; i < len(data); i++ { + data[i] = byte(' ' + i%('~'-' ')) + } + w := new(bytes.Buffer) + for i := 0; i < len(bufsizes); i++ { + for j := 0; j < len(bufsizes); j++ { + nwrite := bufsizes[i] + bs := bufsizes[j] + + // Write nwrite bytes using buffer size bs. + // Check that the right amount makes it out + // and that the data is correct. + + w.Reset() + buf := NewWriterSize(w, bs) + context := fmt.Sprintf("nwrite=%d bufsize=%d", nwrite, bs) + n, e1 := buf.Write(data[0:nwrite]) + if e1 != nil || n != nwrite { + t.Errorf("%s: buf.Write %d = %d, %v", context, nwrite, n, e1) + continue + } + if e := buf.Flush(); e != nil { + t.Errorf("%s: buf.Flush = %v", context, e) + } + + written := w.Bytes() + if len(written) != nwrite { + t.Errorf("%s: %d bytes written", context, len(written)) + } + for l := 0; l < len(written); l++ { + if written[i] != data[i] { + t.Errorf("wrong bytes written") + t.Errorf("want=%q", data[0:len(written)]) + t.Errorf("have=%q", written) + } + } + } + } +} + +// Check that write errors are returned properly. + +type errorWriterTest struct { + n, m int + err error + expect error +} + +func (w errorWriterTest) Write(p []byte) (int, error) { + return len(p) * w.n / w.m, w.err +} + +var errorWriterTests = []errorWriterTest{ + {0, 1, nil, io.ErrShortWrite}, + {1, 2, nil, io.ErrShortWrite}, + {1, 1, nil, nil}, + {0, 1, io.ErrClosedPipe, io.ErrClosedPipe}, + {1, 2, io.ErrClosedPipe, io.ErrClosedPipe}, + {1, 1, io.ErrClosedPipe, io.ErrClosedPipe}, +} + +func TestWriteErrors(t *testing.T) { + for _, w := range errorWriterTests { + buf := NewWriter(w) + _, e := buf.Write([]byte("hello world")) + if e != nil { + t.Errorf("Write hello to %v: %v", w, e) + continue + } + // Two flushes, to verify the error is sticky. + for i := 0; i < 2; i++ { + e = buf.Flush() + if e != w.expect { + t.Errorf("Flush %d/2 %v: got %v, wanted %v", i+1, w, e, w.expect) + } + } + } +} + +func TestNewReaderSizeIdempotent(t *testing.T) { + const BufSize = 1000 + b := NewReaderSize(strings.NewReader("hello world"), BufSize) + // Does it recognize itself? + b1 := NewReaderSize(b, BufSize) + if b1 != b { + t.Error("NewReaderSize did not detect underlying Reader") + } + // Does it wrap if existing buffer is too small? + b2 := NewReaderSize(b, 2*BufSize) + if b2 == b { + t.Error("NewReaderSize did not enlarge buffer") + } +} + +func TestNewWriterSizeIdempotent(t *testing.T) { + const BufSize = 1000 + b := NewWriterSize(new(bytes.Buffer), BufSize) + // Does it recognize itself? + b1 := NewWriterSize(b, BufSize) + if b1 != b { + t.Error("NewWriterSize did not detect underlying Writer") + } + // Does it wrap if existing buffer is too small? + b2 := NewWriterSize(b, 2*BufSize) + if b2 == b { + t.Error("NewWriterSize did not enlarge buffer") + } +} + +func TestWriteString(t *testing.T) { + const BufSize = 8 + buf := new(bytes.Buffer) + b := NewWriterSize(buf, BufSize) + b.WriteString("0") // easy + b.WriteString("123456") // still easy + b.WriteString("7890") // easy after flush + b.WriteString("abcdefghijklmnopqrstuvwxy") // hard + b.WriteString("z") + if err := b.Flush(); err != nil { + t.Error("WriteString", err) + } + s := "01234567890abcdefghijklmnopqrstuvwxyz" + if string(buf.Bytes()) != s { + t.Errorf("WriteString wants %q gets %q", s, string(buf.Bytes())) + } +} + +func TestBufferFull(t *testing.T) { + const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party" + buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize) + line, err := buf.ReadSlice('!') + if string(line) != "And now, hello, " || err != ErrBufferFull { + t.Errorf("first ReadSlice(,) = %q, %v", line, err) + } + line, err = buf.ReadSlice('!') + if string(line) != "world!" || err != nil { + t.Errorf("second ReadSlice(,) = %q, %v", line, err) + } +} + +func TestPeek(t *testing.T) { + p := make([]byte, 10) + // string is 16 (minReadBufferSize) long. + buf := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize) + if s, err := buf.Peek(1); string(s) != "a" || err != nil { + t.Fatalf("want %q got %q, err=%v", "a", string(s), err) + } + if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { + t.Fatalf("want %q got %q, err=%v", "abcd", string(s), err) + } + if _, err := buf.Peek(-1); err != ErrNegativeCount { + t.Fatalf("want ErrNegativeCount got %v", err) + } + if _, err := buf.Peek(32); err != ErrBufferFull { + t.Fatalf("want ErrBufFull got %v", err) + } + if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil { + t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err) + } + if s, err := buf.Peek(1); string(s) != "d" || err != nil { + t.Fatalf("want %q got %q, err=%v", "d", string(s), err) + } + if s, err := buf.Peek(2); string(s) != "de" || err != nil { + t.Fatalf("want %q got %q, err=%v", "de", string(s), err) + } + if _, err := buf.Read(p[0:3]); string(p[0:3]) != "def" || err != nil { + t.Fatalf("want %q got %q, err=%v", "def", string(p[0:3]), err) + } + if s, err := buf.Peek(4); string(s) != "ghij" || err != nil { + t.Fatalf("want %q got %q, err=%v", "ghij", string(s), err) + } + if _, err := buf.Read(p[0:]); string(p[0:]) != "ghijklmnop" || err != nil { + t.Fatalf("want %q got %q, err=%v", "ghijklmnop", string(p[0:minReadBufferSize]), err) + } + if s, err := buf.Peek(0); string(s) != "" || err != nil { + t.Fatalf("want %q got %q, err=%v", "", string(s), err) + } + if _, err := buf.Peek(1); err != io.EOF { + t.Fatalf("want EOF got %v", err) + } + + // Test for issue 3022, not exposing a reader's error on a successful Peek. + buf = NewReaderSize(dataAndEOFReader("abcd"), 32) + if s, err := buf.Peek(2); string(s) != "ab" || err != nil { + t.Errorf(`Peek(2) on "abcd", EOF = %q, %v; want "ab", nil`, string(s), err) + } + if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { + t.Errorf(`Peek(4) on "abcd", EOF = %q, %v; want "abcd", nil`, string(s), err) + } + if n, err := buf.Read(p[0:5]); string(p[0:n]) != "abcd" || err != nil { + t.Fatalf("Read after peek = %q, %v; want abcd, EOF", p[0:n], err) + } + if n, err := buf.Read(p[0:1]); string(p[0:n]) != "" || err != io.EOF { + t.Fatalf(`second Read after peek = %q, %v; want "", EOF`, p[0:n], err) + } +} + +type dataAndEOFReader string + +func (r dataAndEOFReader) Read(p []byte) (int, error) { + return copy(p, r), io.EOF +} + +func TestPeekThenUnreadRune(t *testing.T) { + // This sequence used to cause a crash. + r := NewReader(strings.NewReader("x")) + r.ReadRune() + r.Peek(1) + r.UnreadRune() + r.ReadRune() // Used to panic here +} + +var testOutput = []byte("0123456789abcdefghijklmnopqrstuvwxy") +var testInput = []byte("012\n345\n678\n9ab\ncde\nfgh\nijk\nlmn\nopq\nrst\nuvw\nxy") +var testInputrn = []byte("012\r\n345\r\n678\r\n9ab\r\ncde\r\nfgh\r\nijk\r\nlmn\r\nopq\r\nrst\r\nuvw\r\nxy\r\n\n\r\n") + +// TestReader wraps a []byte and returns reads of a specific length. +type testReader struct { + data []byte + stride int +} + +func (t *testReader) Read(buf []byte) (n int, err error) { + n = t.stride + if n > len(t.data) { + n = len(t.data) + } + if n > len(buf) { + n = len(buf) + } + copy(buf, t.data) + t.data = t.data[n:] + if len(t.data) == 0 { + err = io.EOF + } + return +} + +func testReadLine(t *testing.T, input []byte) { + //for stride := 1; stride < len(input); stride++ { + for stride := 1; stride < 2; stride++ { + done := 0 + reader := testReader{input, stride} + l := NewReaderSize(&reader, len(input)+1) + for { + line, isPrefix, err := l.ReadLine() + if len(line) > 0 && err != nil { + t.Errorf("ReadLine returned both data and error: %s", err) + } + if isPrefix { + t.Errorf("ReadLine returned prefix") + } + if err != nil { + if err != io.EOF { + t.Fatalf("Got unknown error: %s", err) + } + break + } + if want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) { + t.Errorf("Bad line at stride %d: want: %x got: %x", stride, want, line) + } + done += len(line) + } + if done != len(testOutput) { + t.Errorf("ReadLine didn't return everything: got: %d, want: %d (stride: %d)", done, len(testOutput), stride) + } + } +} + +func TestReadLine(t *testing.T) { + testReadLine(t, testInput) + testReadLine(t, testInputrn) +} + +func TestLineTooLong(t *testing.T) { + data := make([]byte, 0) + for i := 0; i < minReadBufferSize*5/2; i++ { + data = append(data, '0'+byte(i%10)) + } + buf := bytes.NewReader(data) + l := NewReaderSize(buf, minReadBufferSize) + line, isPrefix, err := l.ReadLine() + if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { + t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err) + } + data = data[len(line):] + line, isPrefix, err = l.ReadLine() + if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { + t.Errorf("bad result for second line: got %q want %q %v", line, data[:minReadBufferSize], err) + } + data = data[len(line):] + line, isPrefix, err = l.ReadLine() + if isPrefix || !bytes.Equal(line, data[:minReadBufferSize/2]) || err != nil { + t.Errorf("bad result for third line: got %q want %q %v", line, data[:minReadBufferSize/2], err) + } + line, isPrefix, err = l.ReadLine() + if isPrefix || err == nil { + t.Errorf("expected no more lines: %x %s", line, err) + } +} + +func TestReadAfterLines(t *testing.T) { + line1 := "this is line1" + restData := "this is line2\nthis is line 3\n" + inbuf := bytes.NewReader([]byte(line1 + "\n" + restData)) + outbuf := new(bytes.Buffer) + maxLineLength := len(line1) + len(restData)/2 + l := NewReaderSize(inbuf, maxLineLength) + line, isPrefix, err := l.ReadLine() + if isPrefix || err != nil || string(line) != line1 { + t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line)) + } + n, err := io.Copy(outbuf, l) + if int(n) != len(restData) || err != nil { + t.Errorf("bad result for Read: n=%d err=%v", n, err) + } + if outbuf.String() != restData { + t.Errorf("bad result for Read: got %q; expected %q", outbuf.String(), restData) + } +} + +func TestReadEmptyBuffer(t *testing.T) { + l := NewReaderSize(new(bytes.Buffer), minReadBufferSize) + line, isPrefix, err := l.ReadLine() + if err != io.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} + +func TestLinesAfterRead(t *testing.T) { + l := NewReaderSize(bytes.NewReader([]byte("foo")), minReadBufferSize) + _, err := ioutil.ReadAll(l) + if err != nil { + t.Error(err) + return + } + + line, isPrefix, err := l.ReadLine() + if err != io.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} + +func TestReadLineNonNilLineOrError(t *testing.T) { + r := NewReader(strings.NewReader("line 1\n")) + for i := 0; i < 2; i++ { + l, _, err := r.ReadLine() + if l != nil && err != nil { + t.Fatalf("on line %d/2; ReadLine=%#v, %v; want non-nil line or Error, but not both", + i+1, l, err) + } + } +} + +type readLineResult struct { + line []byte + isPrefix bool + err error +} + +var readLineNewlinesTests = []struct { + input string + expect []readLineResult +}{ + {"012345678901234\r\n012345678901234\r\n", []readLineResult{ + {[]byte("012345678901234"), true, nil}, + {nil, false, nil}, + {[]byte("012345678901234"), true, nil}, + {nil, false, nil}, + {nil, false, io.EOF}, + }}, + {"0123456789012345\r012345678901234\r", []readLineResult{ + {[]byte("0123456789012345"), true, nil}, + {[]byte("\r012345678901234"), true, nil}, + {[]byte("\r"), false, nil}, + {nil, false, io.EOF}, + }}, +} + +func TestReadLineNewlines(t *testing.T) { + for _, e := range readLineNewlinesTests { + testReadLineNewlines(t, e.input, e.expect) + } +} + +func testReadLineNewlines(t *testing.T, input string, expect []readLineResult) { + b := NewReaderSize(strings.NewReader(input), minReadBufferSize) + for i, e := range expect { + line, isPrefix, err := b.ReadLine() + if !bytes.Equal(line, e.line) { + t.Errorf("%q call %d, line == %q, want %q", input, i, line, e.line) + return + } + if isPrefix != e.isPrefix { + t.Errorf("%q call %d, isPrefix == %v, want %v", input, i, isPrefix, e.isPrefix) + return + } + if err != e.err { + t.Errorf("%q call %d, err == %v, want %v", input, i, err, e.err) + return + } + } +} + +func createTestInput(n int) []byte { + input := make([]byte, n) + for i := range input { + // 101 and 251 are arbitrary prime numbers. + // The idea is to create an input sequence + // which doesn't repeat too frequently. + input[i] = byte(i % 251) + if i%101 == 0 { + input[i] ^= byte(i / 101) + } + } + return input +} + +func TestReaderWriteTo(t *testing.T) { + input := createTestInput(8192) + r := NewReader(onlyReader{bytes.NewReader(input)}) + w := new(bytes.Buffer) + if n, err := r.WriteTo(w); err != nil || n != int64(len(input)) { + t.Fatalf("r.WriteTo(w) = %d, %v, want %d, nil", n, err, len(input)) + } + + for i, val := range w.Bytes() { + if val != input[i] { + t.Errorf("after write: out[%d] = %#x, want %#x", i, val, input[i]) + } + } +} + +type errorWriterToTest struct { + rn, wn int + rerr, werr error + expected error +} + +func (r errorWriterToTest) Read(p []byte) (int, error) { + return len(p) * r.rn, r.rerr +} + +func (w errorWriterToTest) Write(p []byte) (int, error) { + return len(p) * w.wn, w.werr +} + +var errorWriterToTests = []errorWriterToTest{ + {1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe}, + {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe}, + {0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe}, + {0, 1, io.EOF, nil, nil}, +} + +func TestReaderWriteToErrors(t *testing.T) { + for i, rw := range errorWriterToTests { + r := NewReader(rw) + if _, err := r.WriteTo(rw); err != rw.expected { + t.Errorf("r.WriteTo(errorWriterToTests[%d]) = _, %v, want _,%v", i, err, rw.expected) + } + } +} + +func TestWriterReadFrom(t *testing.T) { + ws := []func(io.Writer) io.Writer{ + func(w io.Writer) io.Writer { return onlyWriter{w} }, + func(w io.Writer) io.Writer { return w }, + } + + rs := []func(io.Reader) io.Reader{ + iotest.DataErrReader, + func(r io.Reader) io.Reader { return r }, + } + + for ri, rfunc := range rs { + for wi, wfunc := range ws { + input := createTestInput(8192) + b := new(bytes.Buffer) + w := NewWriter(wfunc(b)) + r := rfunc(bytes.NewReader(input)) + if n, err := w.ReadFrom(r); err != nil || n != int64(len(input)) { + t.Errorf("ws[%d],rs[%d]: w.ReadFrom(r) = %d, %v, want %d, nil", wi, ri, n, err, len(input)) + continue + } + if err := w.Flush(); err != nil { + t.Errorf("Flush returned %v", err) + continue + } + if got, want := b.String(), string(input); got != want { + t.Errorf("ws[%d], rs[%d]:\ngot %q\nwant %q\n", wi, ri, got, want) + } + } + } +} + +type errorReaderFromTest struct { + rn, wn int + rerr, werr error + expected error +} + +func (r errorReaderFromTest) Read(p []byte) (int, error) { + return len(p) * r.rn, r.rerr +} + +func (w errorReaderFromTest) Write(p []byte) (int, error) { + return len(p) * w.wn, w.werr +} + +var errorReaderFromTests = []errorReaderFromTest{ + {0, 1, io.EOF, nil, nil}, + {1, 1, io.EOF, nil, nil}, + {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe}, + {0, 0, io.ErrClosedPipe, io.ErrShortWrite, io.ErrClosedPipe}, + {1, 0, nil, io.ErrShortWrite, io.ErrShortWrite}, +} + +func TestWriterReadFromErrors(t *testing.T) { + for i, rw := range errorReaderFromTests { + w := NewWriter(rw) + if _, err := w.ReadFrom(rw); err != rw.expected { + t.Errorf("w.ReadFrom(errorReaderFromTests[%d]) = _, %v, want _,%v", i, err, rw.expected) + } + } +} + +// TestWriterReadFromCounts tests that using io.Copy to copy into a +// bufio.Writer does not prematurely flush the buffer. For example, when +// buffering writes to a network socket, excessive network writes should be +// avoided. +func TestWriterReadFromCounts(t *testing.T) { + var w0 writeCountingDiscard + b0 := NewWriterSize(&w0, 1234) + b0.WriteString(strings.Repeat("x", 1000)) + if w0 != 0 { + t.Fatalf("write 1000 'x's: got %d writes, want 0", w0) + } + b0.WriteString(strings.Repeat("x", 200)) + if w0 != 0 { + t.Fatalf("write 1200 'x's: got %d writes, want 0", w0) + } + io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 30))}) + if w0 != 0 { + t.Fatalf("write 1230 'x's: got %d writes, want 0", w0) + } + io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 9))}) + if w0 != 1 { + t.Fatalf("write 1239 'x's: got %d writes, want 1", w0) + } + + var w1 writeCountingDiscard + b1 := NewWriterSize(&w1, 1234) + b1.WriteString(strings.Repeat("x", 1200)) + b1.Flush() + if w1 != 1 { + t.Fatalf("flush 1200 'x's: got %d writes, want 1", w1) + } + b1.WriteString(strings.Repeat("x", 89)) + if w1 != 1 { + t.Fatalf("write 1200 + 89 'x's: got %d writes, want 1", w1) + } + io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 700))}) + if w1 != 1 { + t.Fatalf("write 1200 + 789 'x's: got %d writes, want 1", w1) + } + io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 600))}) + if w1 != 2 { + t.Fatalf("write 1200 + 1389 'x's: got %d writes, want 2", w1) + } + b1.Flush() + if w1 != 3 { + t.Fatalf("flush 1200 + 1389 'x's: got %d writes, want 3", w1) + } +} + +// A writeCountingDiscard is like ioutil.Discard and counts the number of times +// Write is called on it. +type writeCountingDiscard int + +func (w *writeCountingDiscard) Write(p []byte) (int, error) { + *w++ + return len(p), nil +} + +type negativeReader int + +func (r *negativeReader) Read([]byte) (int, error) { return -1, nil } + +func TestNegativeRead(t *testing.T) { + // should panic with a description pointing at the reader, not at itself. + // (should NOT panic with slice index error, for example.) + b := NewReader(new(negativeReader)) + defer func() { + switch err := recover().(type) { + case nil: + t.Fatal("read did not panic") + case error: + if !strings.Contains(err.Error(), "reader returned negative count from Read") { + t.Fatalf("wrong panic: %v", err) + } + default: + t.Fatalf("unexpected panic value: %T(%v)", err, err) + } + }() + b.Read(make([]byte, 100)) +} + +var errFake = errors.New("fake error") + +type errorThenGoodReader struct { + didErr bool + nread int +} + +func (r *errorThenGoodReader) Read(p []byte) (int, error) { + r.nread++ + if !r.didErr { + r.didErr = true + return 0, errFake + } + return len(p), nil +} + +func TestReaderClearError(t *testing.T) { + r := &errorThenGoodReader{} + b := NewReader(r) + buf := make([]byte, 1) + if _, err := b.Read(nil); err != nil { + t.Fatalf("1st nil Read = %v; want nil", err) + } + if _, err := b.Read(buf); err != errFake { + t.Fatalf("1st Read = %v; want errFake", err) + } + if _, err := b.Read(nil); err != nil { + t.Fatalf("2nd nil Read = %v; want nil", err) + } + if _, err := b.Read(buf); err != nil { + t.Fatalf("3rd Read with buffer = %v; want nil", err) + } + if r.nread != 2 { + t.Errorf("num reads = %d; want 2", r.nread) + } +} + +// Test for golang.org/issue/5947 +func TestWriterReadFromWhileFull(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 10) + + // Fill buffer exactly. + n, err := w.Write([]byte("0123456789")) + if n != 10 || err != nil { + t.Fatalf("Write returned (%v, %v), want (10, nil)", n, err) + } + + // Use ReadFrom to read in some data. + n2, err := w.ReadFrom(strings.NewReader("abcdef")) + if n2 != 6 || err != nil { + t.Fatalf("ReadFrom returned (%v, %v), want (6, nil)", n2, err) + } +} + +type emptyThenNonEmptyReader struct { + r io.Reader + n int +} + +func (r *emptyThenNonEmptyReader) Read(p []byte) (int, error) { + if r.n <= 0 { + return r.r.Read(p) + } + r.n-- + return 0, nil +} + +// Test for golang.org/issue/7611 +func TestWriterReadFromUntilEOF(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 5) + + // Partially fill buffer + n, err := w.Write([]byte("0123")) + if n != 4 || err != nil { + t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err) + } + + // Use ReadFrom to read in some data. + r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 3} + n2, err := w.ReadFrom(r) + if n2 != 4 || err != nil { + t.Fatalf("ReadFrom returned (%v, %v), want (4, nil)", n2, err) + } + w.Flush() + if got, want := string(buf.Bytes()), "0123abcd"; got != want { + t.Fatalf("buf.Bytes() returned %q, want %q", got, want) + } +} + +func TestWriterReadFromErrNoProgress(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 5) + + // Partially fill buffer + n, err := w.Write([]byte("0123")) + if n != 4 || err != nil { + t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err) + } + + // Use ReadFrom to read in some data. + r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 100} + n2, err := w.ReadFrom(r) + if n2 != 0 || err != io.ErrNoProgress { + t.Fatalf("buf.Bytes() returned (%v, %v), want (0, io.ErrNoProgress)", n2, err) + } +} + +func TestReaderReset(t *testing.T) { + r := NewReader(strings.NewReader("foo foo")) + buf := make([]byte, 3) + r.Read(buf) + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + r.Reset(strings.NewReader("bar bar")) + all, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(all) != "bar bar" { + t.Errorf("ReadAll = %q; want bar bar", all) + } +} + +func TestWriterReset(t *testing.T) { + var buf1, buf2 bytes.Buffer + w := NewWriter(&buf1) + w.WriteString("foo") + w.Reset(&buf2) // and not flushed + w.WriteString("bar") + w.Flush() + if buf1.String() != "" { + t.Errorf("buf1 = %q; want empty", buf1.String()) + } + if buf2.String() != "bar" { + t.Errorf("buf2 = %q; want bar", buf2.String()) + } +} + +// An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have. +type onlyReader struct { + io.Reader +} + +// An onlyWriter only implements io.Writer, no matter what other methods the underlying implementation may have. +type onlyWriter struct { + io.Writer +} + +func BenchmarkReaderCopyOptimal(b *testing.B) { + // Optimal case is where the underlying reader implements io.WriterTo + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := NewReader(srcBuf) + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + src.Reset(srcBuf) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderCopyUnoptimal(b *testing.B) { + // Unoptimal case is where the underlying reader doesn't implement io.WriterTo + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := NewReader(onlyReader{srcBuf}) + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + src.Reset(onlyReader{srcBuf}) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderCopyNoWriteTo(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + srcReader := NewReader(srcBuf) + src := onlyReader{srcReader} + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + srcReader.Reset(srcBuf) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderWriteToOptimal(b *testing.B) { + const bufSize = 16 << 10 + buf := make([]byte, bufSize) + r := bytes.NewReader(buf) + srcReader := NewReaderSize(onlyReader{r}, 1<<10) + if _, ok := ioutil.Discard.(io.ReaderFrom); !ok { + b.Fatal("ioutil.Discard doesn't support ReaderFrom") + } + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + srcReader.Reset(onlyReader{r}) + n, err := srcReader.WriteTo(ioutil.Discard) + if err != nil { + b.Fatal(err) + } + if n != bufSize { + b.Fatalf("n = %d; want %d", n, bufSize) + } + } +} + +func BenchmarkWriterCopyOptimal(b *testing.B) { + // Optimal case is where the underlying writer implements io.ReaderFrom + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dst := NewWriter(dstBuf) + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dst.Reset(dstBuf) + io.Copy(dst, src) + } +} + +func BenchmarkWriterCopyUnoptimal(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dst := NewWriter(onlyWriter{dstBuf}) + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dst.Reset(onlyWriter{dstBuf}) + io.Copy(dst, src) + } +} + +func BenchmarkWriterCopyNoReadFrom(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dstWriter := NewWriter(dstBuf) + dst := onlyWriter{dstWriter} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dstWriter.Reset(dstBuf) + io.Copy(dst, src) + } +} + +func BenchmarkReaderEmpty(b *testing.B) { + b.ReportAllocs() + str := strings.Repeat("x", 16<<10) + for i := 0; i < b.N; i++ { + br := NewReader(strings.NewReader(str)) + n, err := io.Copy(ioutil.Discard, br) + if err != nil { + b.Fatal(err) + } + if n != int64(len(str)) { + b.Fatal("wrong length") + } + } +} + +func BenchmarkWriterEmpty(b *testing.B) { + b.ReportAllocs() + str := strings.Repeat("x", 1<<10) + bs := []byte(str) + for i := 0; i < b.N; i++ { + bw := NewWriter(ioutil.Discard) + bw.Flush() + bw.WriteByte('a') + bw.Flush() + bw.WriteRune('B') + bw.Flush() + bw.Write(bs) + bw.Flush() + bw.WriteString(str) + bw.Flush() + } +} + +func BenchmarkWriterFlush(b *testing.B) { + b.ReportAllocs() + bw := NewWriter(ioutil.Discard) + str := strings.Repeat("x", 50) + for i := 0; i < b.N; i++ { + bw.WriteString(str) + bw.Flush() + } +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go new file mode 100644 index 000000000..16629d022 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +func (b *Buffer) Cap() int { + return cap(b.buf) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml new file mode 100644 index 000000000..67467e140 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml @@ -0,0 +1,15 @@ +sudo: false +language: go + +go: + - 1.4.1 + +before_script: + - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi + +os: + - linux + - osx + +notifications: + email: false diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS new file mode 100644 index 000000000..4e0e8284e --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS @@ -0,0 +1,34 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Adrien Bustany +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Dave Cheney +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Matt Layher +Nathan Youngman +Paul Hammond +Pieter Droogendijk +Pursuit92 +Rob Figueiredo +Soge Zhang +Tilak Sharma +Travis Cline +Tudor Golubenco +Yukang +bronze1man +debrando +henrikedwards diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md new file mode 100644 index 000000000..ea9428a2a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md @@ -0,0 +1,263 @@ +# Changelog + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 + diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md new file mode 100644 index 000000000..0f377f341 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, OS X and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE new file mode 100644 index 000000000..f21e54080 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace new file mode 100644 index 000000000..e69de29bb diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml new file mode 100644 index 000000000..204217fb0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml @@ -0,0 +1,26 @@ +## OS X build (CircleCI iOS beta) + +# Pretend like it's an Xcode project, at least to get it running. +machine: + environment: + XCODE_WORKSPACE: NotUsed.xcworkspace + XCODE_SCHEME: NotUsed + # This is where the go project is actually checked out to: + CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify + +dependencies: + pre: + - brew upgrade go + +test: + override: + - go test ./... + +# Idealized future config, eventually with cross-platform build matrix :-) + +# machine: +# go: +# version: 1.4 +# os: +# - osx +# - linux diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go new file mode 100644 index 000000000..306379660 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +package fsnotify_test + +import ( + "log" + + "github.com/go-fsnotify/fsnotify" +) + +func ExampleNewWatcher() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event := <-watcher.Events: + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err := <-watcher.Errors: + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go new file mode 100644 index 000000000..c899ee008 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go @@ -0,0 +1,62 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if e.Op&Create == Create { + buffer.WriteString("|CREATE") + } + if e.Op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if e.Op&Write == Write { + buffer.WriteString("|WRITE") + } + if e.Op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if e.Op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + + // If buffer remains empty, return no event names + if buffer.Len() == 0 { + return fmt.Sprintf("%q: ", e.Name) + } + + // Return a list of event names, with leading pipe character stripped + return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go new file mode 100644 index 000000000..d7759ec8c --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := syscall.InotifyInit() + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | + syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | + syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + watchEntry, found := w.watches[name] + w.mu.Unlock() + if found { + watchEntry.flags |= flags + flags |= syscall.IN_MASK_ADD + } + wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + w.mu.Lock() + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + w.mu.Unlock() + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // That means we can safely delete it from our watches, whatever inotify_rm_watch does. + delete(w.watches, name) + success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer syscall.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = syscall.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == syscall.EINTR { + continue + } + + // syscall.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < syscall.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occured while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-syscall.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name := w.paths[int(raw.Wd)] + w.mu.Unlock() + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += syscall.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&syscall.IN_IGNORED == syscall.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO { + e.Op |= Create + } + if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE { + e.Op |= Remove + } + if mask&syscall.IN_MODIFY == syscall.IN_MODIFY { + e.Op |= Write + } + if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go new file mode 100644 index 000000000..3b4178404 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go @@ -0,0 +1,186 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "syscall" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = syscall.EpollCreate(1) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := syscall.EpollEvent{ + Fd: int32(poller.fd), + Events: syscall.EPOLLIN, + } + errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = syscall.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: syscall.EPOLLIN, + } + errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]syscall.EpollEvent, 7) + for { + n, errno := syscall.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == syscall.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&syscall.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&syscall.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let syscall.Read pick up the error. + epollerr = true + } + if event.Events&syscall.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&syscall.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&syscall.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&syscall.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := syscall.Write(poller.pipe[1], buf) + if n == -1 { + if errno == syscall.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := syscall.Read(poller.pipe[0], buf) + if n == -1 { + if errno == syscall.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + syscall.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + syscall.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + syscall.Close(poller.epfd) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go new file mode 100644 index 000000000..af9f407f8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go @@ -0,0 +1,228 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "syscall" + "testing" + "time" +) + +type testFd [2]int + +func makeTestFd(t *testing.T) testFd { + var tfd testFd + errno := syscall.Pipe(tfd[:]) + if errno != nil { + t.Fatalf("Failed to create pipe: %v", errno) + } + return tfd +} + +func (tfd testFd) fd() int { + return tfd[0] +} + +func (tfd testFd) closeWrite(t *testing.T) { + errno := syscall.Close(tfd[1]) + if errno != nil { + t.Fatalf("Failed to close write end of pipe: %v", errno) + } +} + +func (tfd testFd) put(t *testing.T) { + buf := make([]byte, 10) + _, errno := syscall.Write(tfd[1], buf) + if errno != nil { + t.Fatalf("Failed to write to pipe: %v", errno) + } +} + +func (tfd testFd) get(t *testing.T) { + buf := make([]byte, 10) + _, errno := syscall.Read(tfd[0], buf) + if errno != nil { + t.Fatalf("Failed to read from pipe: %v", errno) + } +} + +func (tfd testFd) close() { + syscall.Close(tfd[1]) + syscall.Close(tfd[0]) +} + +func makePoller(t *testing.T) (testFd, *fdPoller) { + tfd := makeTestFd(t) + poller, err := newFdPoller(tfd.fd()) + if err != nil { + t.Fatalf("Failed to create poller: %v", err) + } + return tfd, poller +} + +func TestPollerWithBadFd(t *testing.T) { + _, err := newFdPoller(-1) + if err != syscall.EBADF { + t.Fatalf("Expected EBADF, got: %v", err) + } +} + +func TestPollerWithData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + tfd.get(t) +} + +func TestPollerWithWakeup(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerWithClose(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.closeWrite(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } +} + +func TestPollerWithWakeupAndData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + + // both data and wakeup + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + // data is still in the buffer, wakeup is cleared + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + tfd.get(t) + // data is gone, only wakeup now + err = poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerConcurrent(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + oks := make(chan bool) + live := make(chan bool) + defer close(live) + go func() { + defer close(oks) + for { + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + oks <- ok + if !<-live { + return + } + } + }() + + // Try a write + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.put(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) + live <- true + + // Try a wakeup + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + if <-oks { + t.Fatalf("expected false") + } + live <- true + + // Try a close + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.closeWrite(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go new file mode 100644 index 000000000..035ee8f95 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go @@ -0,0 +1,292 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "os" + "path/filepath" + "syscall" + "testing" + "time" +) + +func TestInotifyCloseRightAway(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Close immediately; it won't even reach the first syscall.Read. + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLater(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Wait until readEvents has reached syscall.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + w.Add(testDir) + + // Wait until readEvents has reached syscall.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseAfterRead(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add .") + } + + // Generate an event. + os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) + + // Wait for readEvents to read the event, then close the watcher. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func isWatcherReallyClosed(t *testing.T, w *Watcher) { + select { + case err, ok := <-w.Errors: + if ok { + t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) + } + default: + t.Fatalf("w.Errors would have blocked; readEvents is still alive!") + } + + select { + case _, ok := <-w.Events: + if ok { + t.Fatalf("w.Events is not closed; readEvents is still alive after closing") + } + default: + t.Fatalf("w.Events would have blocked; readEvents is still alive!") + } +} + +func TestInotifyCloseCreate(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + h, err := os.Create(filepath.Join(testDir, "testfile")) + if err != nil { + t.Fatalf("Failed to create file in testdir: %v", err) + } + h.Close() + select { + case _ = <-w.Events: + case err := <-w.Errors: + t.Fatalf("Error from watcher: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("Took too long to wait for event") + } + + // At this point, we've received one event, so the goroutine is ready. + // It's also blocking on syscall.Read. + // Now we try to swap the file descriptor under its nose. + w.Close() + w, err = NewWatcher() + defer w.Close() + if err != nil { + t.Fatalf("Failed to create second watcher: %v", err) + } + + <-time.After(50 * time.Millisecond) + err = w.Add(testDir) + if err != nil { + t.Fatalf("Error adding testDir again: %v", err) + } +} + +func TestInotifyStress(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + killchan := make(chan struct{}) + defer close(killchan) + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + + proc, err := os.FindProcess(os.Getpid()) + if err != nil { + t.Fatalf("Error finding process: %v", err) + } + + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + err := proc.Signal(syscall.SIGUSR1) + if err != nil { + t.Fatalf("Signal failed: %v", err) + } + case <-killchan: + return + } + } + }() + + go func() { + for { + select { + case <-time.After(11 * time.Millisecond): + err := w.poller.wake() + if err != nil { + t.Fatalf("Wake failed: %v", err) + } + case <-killchan: + return + } + } + }() + + go func() { + for { + select { + case <-killchan: + return + default: + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + time.Sleep(time.Millisecond) + err = os.Remove(testFile) + if err != nil { + t.Fatalf("Remove failed: %v", err) + } + } + } + }() + + creates := 0 + removes := 0 + after := time.After(5 * time.Second) + for { + select { + case <-after: + if creates-removes > 1 || creates-removes < -1 { + t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) + } + if creates < 50 { + t.Fatalf("Expected at least 50 creates, got %d", creates) + } + return + case err := <-w.Errors: + t.Fatalf("Got an error from watcher: %v", err) + case evt := <-w.Events: + if evt.Name != testFile { + t.Fatalf("Got an event for an unknown file: %s", evt.Name) + } + if evt.Op == Create { + creates++ + } + if evt.Op == Remove { + removes++ + } + } + } +} + +func TestInotifyRemoveTwice(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testFile) + if err != nil { + t.Fatalf("Failed to add testFile: %v", err) + } + + err = os.Remove(testFile) + if err != nil { + t.Fatalf("Failed to remove testFile: %v", err) + } + + err = w.Remove(testFile) + if err != syscall.EINVAL { + t.Fatalf("Expected EINVAL from Remove, got: %v", err) + } + + err = w.Remove(testFile) + if err == syscall.EINVAL { + t.Fatalf("Got EINVAL again, watch was not removed") + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go new file mode 100644 index 000000000..59169c6af --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go @@ -0,0 +1,1135 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +package fsnotify + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "sync/atomic" + "testing" + "time" +) + +// An atomic counter +type counter struct { + val int32 +} + +func (c *counter) increment() { + atomic.AddInt32(&c.val, 1) +} + +func (c *counter) value() int32 { + return atomic.LoadInt32(&c.val) +} + +func (c *counter) reset() { + atomic.StoreInt32(&c.val, 0) +} + +// tempMkdir makes a temporary directory +func tempMkdir(t *testing.T) string { + dir, err := ioutil.TempDir("", "fsnotify") + if err != nil { + t.Fatalf("failed to create test directory: %s", err) + } + return dir +} + +// newWatcher initializes an fsnotify Watcher instance. +func newWatcher(t *testing.T) *Watcher { + watcher, err := NewWatcher() + if err != nil { + t.Fatalf("NewWatcher() failed: %s", err) + } + return watcher +} + +// addWatch adds a watch for a directory +func addWatch(t *testing.T, watcher *Watcher, dir string) { + if err := watcher.Add(dir); err != nil { + t.Fatalf("watcher.Add(%q) failed: %s", dir, err) + } +} + +func TestFsnotifyMultipleOperations(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory that's not watched + testDirToMoveFiles := tempMkdir(t) + defer os.RemoveAll(testDirToMoveFiles) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived, renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Rename == Rename { + renameReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // Modify the file outside of the watched dir + f, err = os.Open(testFileRenamed) + if err != nil { + t.Fatalf("open test renamed file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file that was moved + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + rReceived := renameReceived.value() + if dReceived+rReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyMultipleCreates(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived < 3 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) + } + dReceived := deleteReceived.value() + if dReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDirOnly(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + // This should NOT add any events to the fsnotify event queue + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + os.Remove(testFileAlreadyExists) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 1 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDeleteWatchedDir(t *testing.T) { + watcher := newWatcher(t) + defer watcher.Close() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Add a watch for testFile + addWatch(t, watcher, testFileAlreadyExists) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var deleteReceived counter + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + }() + + os.RemoveAll(testDir) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + dReceived := deleteReceived.value() + if dReceived < 2 { + t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) + } +} + +func TestFsnotifySubDir(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") + testSubDir := filepath.Join(testDir, "sub") + testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { + t.Logf("event received: %s", event) + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + addWatch(t, watcher, testDir) + + // Create sub-directory + if err := os.Mkdir(testSubDir, 0777); err != nil { + t.Fatalf("failed to create test sub-directory: %s", err) + } + + // Create a file + var f *os.File + f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + // Create a file (Should not see this! we are not watching subdir) + var fs *os.File + fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fs.Sync() + fs.Close() + + time.Sleep(200 * time.Millisecond) + + // Make sure receive deletes for both file and sub-directory + os.RemoveAll(testSubDir) + os.Remove(testFile1) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyRename(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Rename == Rename { + renameReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if renameReceived.value() == 0 { + t.Fatal("fsnotify rename events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToCreate(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Create == Create { + createReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if createReceived.value() == 0 { + t.Fatal("fsnotify create events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToOverwrite(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Create a file + var fr *os.File + fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fr.Sync() + fr.Close() + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var eventReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testFileRenamed) { + eventReceived.increment() + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if eventReceived.value() == 0 { + t.Fatal("fsnotify events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestRemovalOfWatch(t *testing.T) { + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + if err := watcher.Remove(testDir); err != nil { + t.Fatalf("Could not remove the watch: %v\n", err) + } + + go func() { + select { + case ev := <-watcher.Events: + t.Fatalf("We received event: %v\n", ev) + case <-time.After(500 * time.Millisecond): + t.Log("No event received, as expected.") + } + }() + + time.Sleep(200 * time.Millisecond) + // Modify the file outside of the watched dir + f, err := os.Open(testFileAlreadyExists) + if err != nil { + t.Fatalf("Open test file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + time.Sleep(400 * time.Millisecond) +} + +func TestFsnotifyAttrib(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("attributes don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + // The modifyReceived counter counts IsModify events that are not IsAttrib, + // and the attribReceived counts IsAttrib events (which are also IsModify as + // a consequence). + var modifyReceived counter + var attribReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Chmod == Chmod { + attribReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := os.Chmod(testFile, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here + time.Sleep(500 * time.Millisecond) + if modifyReceived.value() != 0 { + t.Fatal("received an unexpected modify event when creating a test file") + } + if attribReceived.value() == 0 { + t.Fatal("fsnotify attribute events have not received after 500 ms") + } + + // Modifying the contents of the file does not set the attrib flag (although eg. the mtime + // might have been modified). + modifyReceived.reset() + attribReceived.reset() + + f, err = os.OpenFile(testFile, os.O_WRONLY, 0) + if err != nil { + t.Fatalf("reopening test file failed: %s", err) + } + + f.WriteString("more data") + f.Sync() + f.Close() + + time.Sleep(500 * time.Millisecond) + + if modifyReceived.value() != 1 { + t.Fatal("didn't receive a modify event after changing test file contents") + } + + if attribReceived.value() != 0 { + t.Fatal("did receive an unexpected attrib event after changing test file contents") + } + + modifyReceived.reset() + attribReceived.reset() + + // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents + // of the file are not changed though) + if err := os.Chmod(testFile, 0600); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + time.Sleep(500 * time.Millisecond) + + if attribReceived.value() != 1 { + t.Fatal("didn't receive an attribute change after 500ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(1e9): + t.Fatal("event stream was not closed after 1 second") + } + + os.Remove(testFile) +} + +func TestFsnotifyClose(t *testing.T) { + watcher := newWatcher(t) + watcher.Close() + + var done int32 + go func() { + watcher.Close() + atomic.StoreInt32(&done, 1) + }() + + time.Sleep(50e6) // 50 ms + if atomic.LoadInt32(&done) == 0 { + t.Fatal("double Close() test failed: second Close() call didn't return") + } + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + if err := watcher.Add(testDir); err == nil { + t.Fatal("expected error on Watch() after Close(), got nil") + } +} + +func TestFsnotifyFakeSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + var errorsReceived counter + // Receive errors on the error channel on a separate goroutine + go func() { + for errors := range watcher.Errors { + t.Logf("Received error: %s", errors) + errorsReceived.increment() + } + }() + + // Count the CREATE events received + var createEventsReceived, otherEventsReceived counter + go func() { + for ev := range watcher.Events { + t.Logf("event received: %s", ev) + if ev.Op&Create == Create { + createEventsReceived.increment() + } else { + otherEventsReceived.increment() + } + } + }() + + addWatch(t, watcher, testDir) + + if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { + t.Fatalf("Failed to create bogus symlink: %s", err) + } + t.Logf("Created bogus symlink") + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + + // Should not be error, just no events for broken links (watching nothing) + if errorsReceived.value() > 0 { + t.Fatal("fsnotify errors have been received.") + } + if otherEventsReceived.value() > 0 { + t.Fatal("fsnotify other events received on the broken link") + } + + // Except for 1 create event (for the link itself) + if createEventsReceived.value() == 0 { + t.Fatal("fsnotify create events were not received after 500 ms") + } + if createEventsReceived.value() > 1 { + t.Fatal("fsnotify more create events received than expected") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() +} + +// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. +// See https://codereview.appspot.com/103300045/ +// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race +func TestConcurrentRemovalOfWatch(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip("regression test for race only present on darwin") + } + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + + // Test that RemoveWatch can be invoked concurrently, with no data races. + removed1 := make(chan struct{}) + go func() { + defer close(removed1) + watcher.Remove(testDir) + }() + removed2 := make(chan struct{}) + go func() { + close(removed2) + watcher.Remove(testDir) + }() + <-removed1 + <-removed2 +} + +func TestClose(t *testing.T) { + // Regression test for #59 bad file descriptor from Close + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + watcher := newWatcher(t) + if err := watcher.Add(testDir); err != nil { + t.Fatalf("Expected no error on Add, got %v", err) + } + err := watcher.Close() + if err != nil { + t.Fatalf("Expected no error on Close, got %v.", err) + } +} + +func testRename(file1, file2 string) error { + switch runtime.GOOS { + case "windows", "plan9": + return os.Rename(file1, file2) + default: + cmd := exec.Command("mv", file1, file2) + return cmd.Run() + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go new file mode 100644 index 000000000..265622d20 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go @@ -0,0 +1,463 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan bool // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan bool), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + w.mu.Lock() + ws := w.watches + w.mu.Unlock() + + var err error + for name := range ws { + if e := w.Remove(name); e != nil && err == nil { + err = e + } + } + + // Send "quit" message to the reader goroutine: + w.done <- true + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + return w.addWatch(name, noteAllEvents) +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = syscall.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + syscall.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +func (w *Watcher) addWatch(name string, flags uint32) error { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return nil + } + + fi, err = os.Lstat(name) + if err != nil { + return nil + } + } + + watchfd, err = syscall.Open(name, openMode, 0700) + if watchfd == -1 { + return err + } + + isDir = fi.IsDir() + } + + const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + syscall.Close(watchfd) + return err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return err + } + } + } + return nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]syscall.Kevent_t, 10) + + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + err := syscall.Close(w.kq) + if err != nil { + w.Errors <- err + } + close(w.Events) + close(w.Errors) + return + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != syscall.EINTR { + w.Errors <- err + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel + w.Events <- event + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + fileDir, _ := filepath.Split(event.Name) + fileDir = filepath.Clean(fileDir) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); os.IsExist(err) { + w.sendDirectoryChangeEvents(fileDir) + // FIXME: should this be for events on files or just isDir? + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE { + e.Op |= Remove + } + if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE { + e.Op |= Write + } + if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME { + e.Op |= Rename + } + if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + if err := w.internalWatch(filePath, fileInfo); err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + w.Errors <- err + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + w.Events <- newCreateEvent(filePath) + } + + // like watchDirectoryFiles (but without doing another ReadDir) + if err := w.internalWatch(filePath, fileInfo); err != nil { + return + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= syscall.NOTE_DELETE + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = syscall.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]syscall.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := syscall.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) { + n, err := syscall.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) syscall.Timespec { + return syscall.NsecToTimespec(d.Nanoseconds()) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go new file mode 100644 index 000000000..c57ccb427 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "syscall" + +const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go new file mode 100644 index 000000000..174b2c331 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "syscall" + +// note: this constant is not defined on BSD +const openMode = syscall.O_EVTONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go new file mode 100644 index 000000000..811585227 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sys_FS_ALL_EVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sys_FS_ONESHOT = 0x80000000 + sys_FS_ONLYDIR = 0x1000000 + + // Events + sys_FS_ACCESS = 0x1 + sys_FS_ALL_EVENTS = 0xfff + sys_FS_ATTRIB = 0x4 + sys_FS_CLOSE = 0x18 + sys_FS_CREATE = 0x100 + sys_FS_DELETE = 0x200 + sys_FS_DELETE_SELF = 0x400 + sys_FS_MODIFY = 0x2 + sys_FS_MOVE = 0xc0 + sys_FS_MOVED_FROM = 0x40 + sys_FS_MOVED_TO = 0x80 + sys_FS_MOVE_SELF = 0x800 + + // Special events + sys_FS_IGNORED = 0x8000 + sys_FS_Q_OVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO { + e.Op |= Create + } + if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF { + e.Op |= Remove + } + if mask&sys_FS_MODIFY == sys_FS_MODIFY { + e.Op |= Write + } + if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM { + e.Op |= Rename + } + if mask&sys_FS_ATTRIB == sys_FS_ATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sys_FS_ONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) { + if watch.mask&sys_FS_ONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sys_FS_Q_OVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := watch.path + "\\" + name + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sys_FS_DELETE_SELF + case syscall.FILE_ACTION_MODIFIED: + mask = sys_FS_MODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sys_FS_MOVE_SELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sys_FS_ONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sys_FS_ONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = watch.path + "\\" + watch.rename + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sys_FS_ACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sys_FS_MODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sys_FS_ATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sys_FS_CREATE + case syscall.FILE_ACTION_REMOVED: + return sys_FS_DELETE + case syscall.FILE_ACTION_MODIFIED: + return sys_FS_MODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sys_FS_MOVED_FROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sys_FS_MOVED_TO + } + return 0 +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml new file mode 100644 index 000000000..c3cf4b8a6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml @@ -0,0 +1,19 @@ +language: go + +services: +- redis-server + +go: + - 1.1 + - 1.2 + - 1.3 + - tip + +install: + - go get gopkg.in/bufio.v1 + - go get gopkg.in/check.v1 + - mkdir -p $HOME/gopath/src/gopkg.in + - ln -s `pwd` $HOME/gopath/src/gopkg.in/redis.v2 + +before_script: + - redis-server testdata/sentinel.conf --sentinel & diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE new file mode 100644 index 000000000..6855a95fe --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Redis Go Client Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile new file mode 100644 index 000000000..b250d9bfa --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile @@ -0,0 +1,3 @@ +all: + go test gopkg.in/redis.v2 -cpu=1,2,4 + go test gopkg.in/redis.v2 -short -race diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/command.go b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go new file mode 100644 index 000000000..d7c76cf92 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go @@ -0,0 +1,597 @@ +package redis + +import ( + "fmt" + "strconv" + "strings" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + _ Cmder = (*Cmd)(nil) + _ Cmder = (*SliceCmd)(nil) + _ Cmder = (*StatusCmd)(nil) + _ Cmder = (*IntCmd)(nil) + _ Cmder = (*DurationCmd)(nil) + _ Cmder = (*BoolCmd)(nil) + _ Cmder = (*StringCmd)(nil) + _ Cmder = (*FloatCmd)(nil) + _ Cmder = (*StringSliceCmd)(nil) + _ Cmder = (*BoolSliceCmd)(nil) + _ Cmder = (*StringStringMapCmd)(nil) + _ Cmder = (*ZSliceCmd)(nil) + _ Cmder = (*ScanCmd)(nil) +) + +type Cmder interface { + args() []string + parseReply(*bufio.Reader) error + setErr(error) + + writeTimeout() *time.Duration + readTimeout() *time.Duration + + Err() error + String() string +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + cmd.setErr(e) + } +} + +func cmdString(cmd Cmder, val interface{}) string { + s := strings.Join(cmd.args(), " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + return s + ": " + fmt.Sprint(val) + } + return s + +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []string + + err error + + _writeTimeout, _readTimeout *time.Duration +} + +func newBaseCmd(args ...string) *baseCmd { + return &baseCmd{ + _args: args, + } +} + +func (cmd *baseCmd) Err() error { + if cmd.err != nil { + return cmd.err + } + return nil +} + +func (cmd *baseCmd) args() []string { + return cmd._args +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) writeTimeout() *time.Duration { + return cmd._writeTimeout +} + +func (cmd *baseCmd) setWriteTimeout(d time.Duration) { + cmd._writeTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + *baseCmd + + val interface{} +} + +func NewCmd(args ...string) *Cmd { + return &Cmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) parseReply(rd *bufio.Reader) error { + cmd.val, cmd.err = parseReply(rd, parseSlice) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + *baseCmd + + val []interface{} +} + +func NewSliceCmd(args ...string) *SliceCmd { + return &SliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + *baseCmd + + val string +} + +func NewStatusCmd(args ...string) *StatusCmd { + return &StatusCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(string) + return nil +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + *baseCmd + + val int64 +} + +func NewIntCmd(args ...string) *IntCmd { + return &IntCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(int64) + return nil +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + *baseCmd + + val time.Duration + precision time.Duration +} + +func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd { + return &DurationCmd{ + baseCmd: newBaseCmd(args...), + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = time.Duration(v.(int64)) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + *baseCmd + + val bool +} + +func NewBoolCmd(args ...string) *BoolCmd { + return &BoolCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(int64) == 1 + return nil +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + *baseCmd + + val string +} + +func NewStringCmd(args ...string) *StringCmd { + return &StringCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.val, 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.val, 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.val, 64) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(string) + return nil +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + *baseCmd + + val float64 +} + +func NewFloatCmd(args ...string) *FloatCmd { + return &FloatCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + *baseCmd + + val []string +} + +func NewStringSliceCmd(args ...string) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]string) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + *baseCmd + + val []bool +} + +func NewBoolSliceCmd(args ...string) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseBoolSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]bool) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + *baseCmd + + val map[string]string +} + +func NewStringStringMapCmd(args ...string) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringStringMap) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(map[string]string) + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + *baseCmd + + val []Z +} + +func NewZSliceCmd(args ...string) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseZSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]Z) + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + *baseCmd + + cursor int64 + keys []string +} + +func NewScanCmd(args ...string) *ScanCmd { + return &ScanCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *ScanCmd) Val() (int64, []string) { + return cmd.cursor, cmd.keys +} + +func (cmd *ScanCmd) Result() (int64, []string, error) { + return cmd.cursor, cmd.keys, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.keys) +} + +func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error { + vi, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return cmd.err + } + v := vi.([]interface{}) + + cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64) + if cmd.err != nil { + return cmd.err + } + + keys := v[1].([]interface{}) + for _, keyi := range keys { + cmd.keys = append(cmd.keys, keyi.(string)) + } + + return nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go new file mode 100644 index 000000000..6068bab17 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go @@ -0,0 +1,1246 @@ +package redis + +import ( + "io" + "strconv" + "time" +) + +func formatFloat(f float64) string { + return strconv.FormatFloat(f, 'f', -1, 64) +} + +func readTimeout(sec int64) time.Duration { + if sec == 0 { + return 0 + } + return time.Duration(sec+1) * time.Second +} + +//------------------------------------------------------------------------------ + +func (c *Client) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("AUTH", password) + c.Process(cmd) + return cmd +} + +func (c *Client) Echo(message string) *StringCmd { + cmd := NewStringCmd("ECHO", message) + c.Process(cmd) + return cmd +} + +func (c *Client) Ping() *StatusCmd { + cmd := NewStatusCmd("PING") + c.Process(cmd) + return cmd +} + +func (c *Client) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *Client) Select(index int64) *StatusCmd { + cmd := NewStatusCmd("SELECT", strconv.FormatInt(index, 10)) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Del(keys ...string) *IntCmd { + args := append([]string{"DEL"}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) Dump(key string) *StringCmd { + cmd := NewStringCmd("DUMP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Exists(key string) *BoolCmd { + cmd := NewBoolCmd("EXISTS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Expire(key string, dur time.Duration) *BoolCmd { + cmd := NewBoolCmd("EXPIRE", key, strconv.FormatInt(int64(dur/time.Second), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("EXPIREAT", key, strconv.FormatInt(tm.Unix(), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("KEYS", pattern) + c.Process(cmd) + return cmd +} + +func (c *Client) Migrate(host, port, key string, db, timeout int64) *StatusCmd { + cmd := NewStatusCmd( + "MIGRATE", + host, + port, + key, + strconv.FormatInt(db, 10), + strconv.FormatInt(timeout, 10), + ) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("MOVE", key, strconv.FormatInt(db, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectRefCount(keys ...string) *IntCmd { + args := append([]string{"OBJECT", "REFCOUNT"}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectEncoding(keys ...string) *StringCmd { + args := append([]string{"OBJECT", "ENCODING"}, keys...) + cmd := NewStringCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectIdleTime(keys ...string) *DurationCmd { + args := append([]string{"OBJECT", "IDLETIME"}, keys...) + cmd := NewDurationCmd(time.Second, args...) + c.Process(cmd) + return cmd +} + +func (c *Client) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("PERSIST", key) + c.Process(cmd) + return cmd +} + +func (c *Client) PExpire(key string, dur time.Duration) *BoolCmd { + cmd := NewBoolCmd("PEXPIRE", key, strconv.FormatInt(int64(dur/time.Millisecond), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "PEXPIREAT", + key, + strconv.FormatInt(tm.UnixNano()/int64(time.Millisecond), 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "PTTL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) RandomKey() *StringCmd { + cmd := NewStringCmd("RANDOMKEY") + c.Process(cmd) + return cmd +} + +func (c *Client) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("RENAME", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *Client) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("RENAMENX", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *Client) Restore(key string, ttl int64, value string) *StatusCmd { + cmd := NewStatusCmd( + "RESTORE", + key, + strconv.FormatInt(ttl, 10), + value, + ) + c.Process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count float64 + Get []string + Order string + IsAlpha bool + Store string +} + +func (c *Client) Sort(key string, sort Sort) *StringSliceCmd { + args := []string{"SORT", key} + if sort.By != "" { + args = append(args, "BY", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count)) + } + for _, get := range sort.Get { + args = append(args, "GET", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.IsAlpha { + args = append(args, "ALPHA") + } + if sort.Store != "" { + args = append(args, "STORE", sort.Store) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "TTL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Type(key string) *StatusCmd { + cmd := NewStatusCmd("TYPE", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Scan(cursor int64, match string, count int64) *ScanCmd { + args := []string{"SCAN", strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"SSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"HSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"ZSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Append(key, value string) *IntCmd { + cmd := NewIntCmd("APPEND", key, value) + c.Process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *Client) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []string{"BITCOUNT", key} + if bitCount != nil { + args = append( + args, + strconv.FormatInt(bitCount.Start, 10), + strconv.FormatInt(bitCount.End, 10), + ) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) bitOp(op, destKey string, keys ...string) *IntCmd { + args := []string{"BITOP", op, destKey} + args = append(args, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("AND", destKey, keys...) +} + +func (c *Client) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("OR", destKey, keys...) +} + +func (c *Client) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("XOR", destKey, keys...) +} + +func (c *Client) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("NOT", destKey, key) +} + +func (c *Client) Decr(key string) *IntCmd { + cmd := NewIntCmd("DECR", key) + c.Process(cmd) + return cmd +} + +func (c *Client) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("DECRBY", key, strconv.FormatInt(decrement, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) Get(key string) *StringCmd { + cmd := NewStringCmd("GET", key) + c.Process(cmd) + return cmd +} + +func (c *Client) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("GETBIT", key, strconv.FormatInt(offset, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd( + "GETRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(end, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) GetSet(key, value string) *StringCmd { + cmd := NewStringCmd("GETSET", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) Incr(key string) *IntCmd { + cmd := NewIntCmd("INCR", key) + c.Process(cmd) + return cmd +} + +func (c *Client) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("INCRBY", key, strconv.FormatInt(value, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value)) + c.Process(cmd) + return cmd +} + +func (c *Client) MGet(keys ...string) *SliceCmd { + args := append([]string{"MGET"}, keys...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) MSet(pairs ...string) *StatusCmd { + args := append([]string{"MSET"}, pairs...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) MSetNX(pairs ...string) *BoolCmd { + args := append([]string{"MSETNX"}, pairs...) + cmd := NewBoolCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PSetEx(key string, dur time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "PSETEX", + key, + strconv.FormatInt(int64(dur/time.Millisecond), 10), + value, + ) + c.Process(cmd) + return cmd +} + +func (c *Client) Set(key, value string) *StatusCmd { + cmd := NewStatusCmd("SET", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "SETBIT", + key, + strconv.FormatInt(offset, 10), + strconv.FormatInt(int64(value), 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) SetEx(key string, dur time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd("SETEX", key, strconv.FormatInt(int64(dur/time.Second), 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetNX(key, value string) *BoolCmd { + cmd := NewBoolCmd("SETNX", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("SETRANGE", key, strconv.FormatInt(offset, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) StrLen(key string) *IntCmd { + cmd := NewIntCmd("STRLEN", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) HDel(key string, fields ...string) *IntCmd { + args := append([]string{"HDEL", key}, fields...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("HEXISTS", key, field) + c.Process(cmd) + return cmd +} + +func (c *Client) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("HGET", key, field) + c.Process(cmd) + return cmd +} + +func (c *Client) HGetAll(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HGetAllMap(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("HINCRBY", key, field, strconv.FormatInt(incr, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr)) + c.Process(cmd) + return cmd +} + +func (c *Client) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HKEYS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HLen(key string) *IntCmd { + cmd := NewIntCmd("HLEN", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HMGet(key string, fields ...string) *SliceCmd { + args := append([]string{"HMGET", key}, fields...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HMSet(key, field, value string, pairs ...string) *StatusCmd { + args := append([]string{"HMSET", key, field, value}, pairs...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HSet(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSET", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *Client) HSetNX(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSETNX", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *Client) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HVALS", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) BLPop(timeout int64, keys ...string) *StringSliceCmd { + args := append([]string{"BLPOP"}, keys...) + args = append(args, strconv.FormatInt(timeout, 10)) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) BRPop(timeout int64, keys ...string) *StringSliceCmd { + args := append([]string{"BRPOP"}, keys...) + args = append(args, strconv.FormatInt(timeout, 10)) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) BRPopLPush(source, destination string, timeout int64) *StringCmd { + cmd := NewStringCmd( + "BRPOPLPUSH", + source, + destination, + strconv.FormatInt(timeout, 10), + ) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("LINDEX", key, strconv.FormatInt(index, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) LInsert(key, op, pivot, value string) *IntCmd { + cmd := NewIntCmd("LINSERT", key, op, pivot, value) + c.Process(cmd) + return cmd +} + +func (c *Client) LLen(key string) *IntCmd { + cmd := NewIntCmd("LLEN", key) + c.Process(cmd) + return cmd +} + +func (c *Client) LPop(key string) *StringCmd { + cmd := NewStringCmd("LPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) LPush(key string, values ...string) *IntCmd { + args := append([]string{"LPUSH", key}, values...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) LPushX(key, value string) *IntCmd { + cmd := NewIntCmd("LPUSHX", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "LRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) LRem(key string, count int64, value string) *IntCmd { + cmd := NewIntCmd("LREM", key, strconv.FormatInt(count, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) LSet(key string, index int64, value string) *StatusCmd { + cmd := NewStatusCmd("LSET", key, strconv.FormatInt(index, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "LTRIM", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) RPop(key string) *StringCmd { + cmd := NewStringCmd("RPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("RPOPLPUSH", source, destination) + c.Process(cmd) + return cmd +} + +func (c *Client) RPush(key string, values ...string) *IntCmd { + args := append([]string{"RPUSH", key}, values...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) RPushX(key string, value string) *IntCmd { + cmd := NewIntCmd("RPUSHX", key, value) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) SAdd(key string, members ...string) *IntCmd { + args := append([]string{"SADD", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SCard(key string) *IntCmd { + cmd := NewIntCmd("SCARD", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SDiff(keys ...string) *StringSliceCmd { + args := append([]string{"SDIFF"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SDiffStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SDIFFSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SInter(keys ...string) *StringSliceCmd { + args := append([]string{"SINTER"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SInterStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SINTERSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SIsMember(key, member string) *BoolCmd { + cmd := NewBoolCmd("SISMEMBER", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("SMEMBERS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SMove(source, destination, member string) *BoolCmd { + cmd := NewBoolCmd("SMOVE", source, destination, member) + c.Process(cmd) + return cmd +} + +func (c *Client) SPop(key string) *StringCmd { + cmd := NewStringCmd("SPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("SRANDMEMBER", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SRem(key string, members ...string) *IntCmd { + args := append([]string{"SREM", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SUnion(keys ...string) *StringSliceCmd { + args := append([]string{"SUNION"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SUnionStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SUNIONSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type Z struct { + Score float64 + Member string +} + +type ZStore struct { + Weights []int64 + Aggregate string +} + +func (c *Client) ZAdd(key string, members ...Z) *IntCmd { + args := []string{"ZADD", key} + for _, m := range members { + args = append(args, formatFloat(m.Score), m.Member) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZCard(key string) *IntCmd { + cmd := NewIntCmd("ZCARD", key) + c.Process(cmd) + return cmd +} + +func (c *Client) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZCOUNT", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *Client) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZInterStore( + destination string, + store ZStore, + keys ...string, +) *IntCmd { + args := []string{"ZINTERSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)} + args = append(args, keys...) + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, strconv.FormatInt(weight, 10)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []string{ + "ZRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + } + if withScores { + args = append(args, "WITHSCORES") + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *Client) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + args := []string{ + "ZRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + "WITHSCORES", + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +type ZRangeByScore struct { + Min, Max string + + Offset, Count int64 +} + +func (c *Client) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd { + args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max} + if withScores { + args = append(args, "WITHSCORES") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + return c.zRangeByScore(key, opt, false) +} + +func (c *Client) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRem(key string, members ...string) *IntCmd { + args := append([]string{"ZREM", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "ZREMRANGEBYRANK", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *Client) zRevRange(key, start, stop string, withScores bool) *StringSliceCmd { + args := []string{"ZREVRANGE", key, start, stop} + if withScores { + args = append(args, "WITHSCORES") + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRange(key, start, stop string) *StringSliceCmd { + return c.zRevRange(key, start, stop, false) +} + +func (c *Client) ZRevRangeWithScores(key, start, stop string) *ZSliceCmd { + args := []string{"ZREVRANGE", key, start, stop, "WITHSCORES"} + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) zRevRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd { + args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min} + if withScores { + args = append(args, "WITHSCORES") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + return c.zRevRangeByScore(key, opt, false) +} + +func (c *Client) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZREVRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("ZSCORE", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZUnionStore( + destination string, + store ZStore, + keys ...string, +) *IntCmd { + args := []string{"ZUNIONSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)} + args = append(args, keys...) + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, strconv.FormatInt(weight, 10)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("BGREWRITEAOF") + c.Process(cmd) + return cmd +} + +func (c *Client) BgSave() *StatusCmd { + cmd := NewStatusCmd("BGSAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("CLIENT", "KILL", ipPort) + c.Process(cmd) + return cmd +} + +func (c *Client) ClientList() *StringCmd { + cmd := NewStringCmd("CLIENT", "LIST") + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigGet(parameter string) *SliceCmd { + cmd := NewSliceCmd("CONFIG", "GET", parameter) + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigResetStat() *StatusCmd { + cmd := NewStatusCmd("CONFIG", "RESETSTAT") + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigSet(parameter, value string) *StatusCmd { + cmd := NewStatusCmd("CONFIG", "SET", parameter, value) + c.Process(cmd) + return cmd +} + +func (c *Client) DbSize() *IntCmd { + cmd := NewIntCmd("DBSIZE") + c.Process(cmd) + return cmd +} + +func (c *Client) FlushAll() *StatusCmd { + cmd := NewStatusCmd("FLUSHALL") + c.Process(cmd) + return cmd +} + +func (c *Client) FlushDb() *StatusCmd { + cmd := NewStatusCmd("FLUSHDB") + c.Process(cmd) + return cmd +} + +func (c *Client) Info() *StringCmd { + cmd := NewStringCmd("INFO") + c.Process(cmd) + return cmd +} + +func (c *Client) LastSave() *IntCmd { + cmd := NewIntCmd("LASTSAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) Save() *StatusCmd { + cmd := NewStatusCmd("SAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) shutdown(modifier string) *StatusCmd { + var args []string + if modifier == "" { + args = []string{"SHUTDOWN"} + } else { + args = []string{"SHUTDOWN", modifier} + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + if err := cmd.Err(); err != nil { + if err == io.EOF { + // Server quit as expected. + cmd.err = nil + } + } else { + // Server did not quit. String reply contains the reason. + cmd.err = errorf(cmd.val) + cmd.val = "" + } + return cmd +} + +func (c *Client) Shutdown() *StatusCmd { + return c.shutdown("") +} + +func (c *Client) ShutdownSave() *StatusCmd { + return c.shutdown("SAVE") +} + +func (c *Client) ShutdownNoSave() *StatusCmd { + return c.shutdown("NOSAVE") +} + +func (c *Client) SlaveOf(host, port string) *StatusCmd { + cmd := NewStatusCmd("SLAVEOF", host, port) + c.Process(cmd) + return cmd +} + +func (c *Client) SlowLog() { + panic("not implemented") +} + +func (c *Client) Sync() { + panic("not implemented") +} + +func (c *Client) Time() *StringSliceCmd { + cmd := NewStringSliceCmd("TIME") + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Eval(script string, keys []string, args []string) *Cmd { + cmdArgs := []string{"EVAL", script, strconv.FormatInt(int64(len(keys)), 10)} + cmdArgs = append(cmdArgs, keys...) + cmdArgs = append(cmdArgs, args...) + cmd := NewCmd(cmdArgs...) + c.Process(cmd) + return cmd +} + +func (c *Client) EvalSha(sha1 string, keys []string, args []string) *Cmd { + cmdArgs := []string{"EVALSHA", sha1, strconv.FormatInt(int64(len(keys)), 10)} + cmdArgs = append(cmdArgs, keys...) + cmdArgs = append(cmdArgs, args...) + cmd := NewCmd(cmdArgs...) + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptExists(scripts ...string) *BoolSliceCmd { + args := append([]string{"SCRIPT", "EXISTS"}, scripts...) + cmd := NewBoolSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptFlush() *StatusCmd { + cmd := NewStatusCmd("SCRIPT", "FLUSH") + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptKill() *StatusCmd { + cmd := NewStatusCmd("SCRIPT", "KILL") + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptLoad(script string) *StringCmd { + cmd := NewStringCmd("SCRIPT", "LOAD", script) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) DebugObject(key string) *StringCmd { + cmd := NewStringCmd("DEBUG", "OBJECT", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) PubSubChannels(pattern string) *StringSliceCmd { + args := []string{"PUBSUB", "CHANNELS"} + if pattern != "*" { + args = append(args, pattern) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PubSubNumSub(channels ...string) *SliceCmd { + args := []string{"PUBSUB", "NUMSUB"} + args = append(args, channels...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PubSubNumPat() *IntCmd { + cmd := NewIntCmd("PUBSUB", "NUMPAT") + c.Process(cmd) + return cmd +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go new file mode 100644 index 000000000..55262533a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go @@ -0,0 +1,4 @@ +/* +Package redis implements a Redis client. +*/ +package redis diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/error.go b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go new file mode 100644 index 000000000..667fffdc6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go @@ -0,0 +1,23 @@ +package redis + +import ( + "fmt" +) + +// Redis nil reply. +var Nil = errorf("redis: nil") + +// Redis transaction failed. +var TxFailedErr = errorf("redis: transaction failed") + +type redisError struct { + s string +} + +func errorf(s string, args ...interface{}) redisError { + return redisError{s: fmt.Sprintf(s, args...)} +} + +func (err redisError) Error() string { + return err.s +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go new file mode 100644 index 000000000..dbc951310 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go @@ -0,0 +1,180 @@ +package redis_test + +import ( + "fmt" + "strconv" + + "gopkg.in/redis.v2" +) + +var client *redis.Client + +func init() { + client = redis.NewTCPClient(&redis.Options{ + Addr: ":6379", + }) + client.FlushDb() +} + +func ExampleNewTCPClient() { + client := redis.NewTCPClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + pong, err := client.Ping().Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleNewFailoverClient() { + client := redis.NewFailoverClient(&redis.FailoverOptions{ + MasterName: "master", + SentinelAddrs: []string{":26379"}, + }) + + pong, err := client.Ping().Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleClient() { + if err := client.Set("foo", "bar").Err(); err != nil { + panic(err) + } + + v, err := client.Get("hello").Result() + fmt.Printf("%q %q %v", v, err, err == redis.Nil) + // Output: "" "redis: nil" true +} + +func ExampleClient_Incr() { + if err := client.Incr("counter").Err(); err != nil { + panic(err) + } + + n, err := client.Get("counter").Int64() + fmt.Println(n, err) + // Output: 1 +} + +func ExampleClient_Pipelined() { + cmds, err := client.Pipelined(func(c *redis.Pipeline) error { + c.Set("key1", "hello1") + c.Get("key1") + return nil + }) + fmt.Println(err) + set := cmds[0].(*redis.StatusCmd) + fmt.Println(set) + get := cmds[1].(*redis.StringCmd) + fmt.Println(get) + // Output: + // SET key1 hello1: OK + // GET key1: hello1 +} + +func ExamplePipeline() { + pipeline := client.Pipeline() + set := pipeline.Set("key1", "hello1") + get := pipeline.Get("key1") + cmds, err := pipeline.Exec() + fmt.Println(cmds, err) + fmt.Println(set) + fmt.Println(get) + // Output: [SET key1 hello1: OK GET key1: hello1] + // SET key1 hello1: OK + // GET key1: hello1 +} + +func ExampleMulti() { + incr := func(tx *redis.Multi) ([]redis.Cmder, error) { + s, err := tx.Get("key").Result() + if err != nil && err != redis.Nil { + return nil, err + } + n, _ := strconv.ParseInt(s, 10, 64) + + return tx.Exec(func() error { + tx.Set("key", strconv.FormatInt(n+1, 10)) + return nil + }) + } + + client.Del("key") + + tx := client.Multi() + defer tx.Close() + + watch := tx.Watch("key") + _ = watch.Err() + + for { + cmds, err := incr(tx) + if err == redis.TxFailedErr { + continue + } else if err != nil { + panic(err) + } + fmt.Println(cmds, err) + break + } + + // Output: [SET key 1: OK] +} + +func ExamplePubSub() { + pubsub := client.PubSub() + defer pubsub.Close() + + err := pubsub.Subscribe("mychannel") + _ = err + + msg, err := pubsub.Receive() + fmt.Println(msg, err) + + pub := client.Publish("mychannel", "hello") + _ = pub.Err() + + msg, err = pubsub.Receive() + fmt.Println(msg, err) + + // Output: subscribe: mychannel + // Message +} + +func ExampleScript() { + setnx := redis.NewScript(` + if redis.call("get", KEYS[1]) == false then + redis.call("set", KEYS[1], ARGV[1]) + return 1 + end + return 0 + `) + + v1, err := setnx.Run(client, []string{"keynx"}, []string{"foo"}).Result() + fmt.Println(v1.(int64), err) + + v2, err := setnx.Run(client, []string{"keynx"}, []string{"bar"}).Result() + fmt.Println(v2.(int64), err) + + get := client.Get("keynx") + fmt.Println(get) + + // Output: 1 + // 0 + // GET keynx: foo +} + +func Example_customCommand() { + Get := func(client *redis.Client, key string) *redis.StringCmd { + cmd := redis.NewStringCmd("GET", key) + client.Process(cmd) + return cmd + } + + v, err := Get(client, "key_does_not_exist").Result() + fmt.Printf("%q %s", v, err) + // Output: "" redis: nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go new file mode 100644 index 000000000..7f7fa6797 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go @@ -0,0 +1,5 @@ +package redis + +func (c *baseClient) Pool() pool { + return c.connPool +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go new file mode 100644 index 000000000..bff38dfaa --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go @@ -0,0 +1,138 @@ +package redis + +import ( + "errors" + "fmt" +) + +var errDiscard = errors.New("redis: Discard can be used only inside Exec") + +// Not thread-safe. +type Multi struct { + *Client +} + +func (c *Client) Multi() *Multi { + return &Multi{ + Client: &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, true), + }, + }, + } +} + +func (c *Multi) Close() error { + if err := c.Unwatch().Err(); err != nil { + return err + } + return c.Client.Close() +} + +func (c *Multi) Watch(keys ...string) *StatusCmd { + args := append([]string{"WATCH"}, keys...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Unwatch(keys ...string) *StatusCmd { + args := append([]string{"UNWATCH"}, keys...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Discard() error { + if c.cmds == nil { + return errDiscard + } + c.cmds = c.cmds[:1] + return nil +} + +// Exec always returns list of commands. If transaction fails +// TxFailedErr is returned. Otherwise Exec returns error of the first +// failed command or nil. +func (c *Multi) Exec(f func() error) ([]Cmder, error) { + c.cmds = []Cmder{NewStatusCmd("MULTI")} + if err := f(); err != nil { + return nil, err + } + c.cmds = append(c.cmds, NewSliceCmd("EXEC")) + + cmds := c.cmds + c.cmds = nil + + if len(cmds) == 2 { + return []Cmder{}, nil + } + + cn, err := c.conn() + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return cmds[1 : len(cmds)-1], err + } + + err = c.execCmds(cn, cmds) + if err != nil { + c.freeConn(cn, err) + return cmds[1 : len(cmds)-1], err + } + + c.putConn(cn) + return cmds[1 : len(cmds)-1], nil +} + +func (c *Multi) execCmds(cn *conn, cmds []Cmder) error { + err := c.writeCmd(cn, cmds...) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + + statusCmd := NewStatusCmd() + + // Omit last command (EXEC). + cmdsLen := len(cmds) - 1 + + // Parse queued replies. + for i := 0; i < cmdsLen; i++ { + if err := statusCmd.parseReply(cn.rd); err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + } + + // Parse number of replies. + line, err := readLine(cn.rd) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if line[0] != '*' { + err := fmt.Errorf("redis: expected '*', but got line %q", line) + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr) + return TxFailedErr + } + + var firstCmdErr error + + // Parse replies. + // Loop starts from 1 to omit MULTI cmd. + for i := 1; i < cmdsLen; i++ { + cmd := cmds[i] + if err := cmd.parseReply(cn.rd); err != nil { + if firstCmdErr == nil { + firstCmdErr = err + } + } + } + + return firstCmdErr +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go new file mode 100644 index 000000000..b4c380c76 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go @@ -0,0 +1,262 @@ +package redis + +import ( + "errors" + "fmt" + "strconv" + + "gopkg.in/bufio.v1" +) + +type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error) + +var ( + errReaderTooSmall = errors.New("redis: reader is too small") +) + +//------------------------------------------------------------------------------ + +func appendArgs(buf []byte, args []string) []byte { + buf = append(buf, '*') + buf = strconv.AppendUint(buf, uint64(len(args)), 10) + buf = append(buf, '\r', '\n') + for _, arg := range args { + buf = append(buf, '$') + buf = strconv.AppendUint(buf, uint64(len(arg)), 10) + buf = append(buf, '\r', '\n') + buf = append(buf, arg...) + buf = append(buf, '\r', '\n') + } + return buf +} + +//------------------------------------------------------------------------------ + +func readLine(rd *bufio.Reader) ([]byte, error) { + line, isPrefix, err := rd.ReadLine() + if err != nil { + return line, err + } + if isPrefix { + return line, errReaderTooSmall + } + return line, nil +} + +func readN(rd *bufio.Reader, n int) ([]byte, error) { + b, err := rd.ReadN(n) + if err == bufio.ErrBufferFull { + tmp := make([]byte, n) + r := copy(tmp, b) + b = tmp + + for { + nn, err := rd.Read(b[r:]) + r += nn + if r >= n { + // Ignore error if we read enough. + break + } + if err != nil { + return nil, err + } + } + } else if err != nil { + return nil, err + } + return b, nil +} + +//------------------------------------------------------------------------------ + +func parseReq(rd *bufio.Reader) ([]string, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + if line[0] != '*' { + return []string{string(line)}, nil + } + numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + args := make([]string, 0, numReplies) + for i := int64(0); i < numReplies; i++ { + line, err = readLine(rd) + if err != nil { + return nil, err + } + if line[0] != '$' { + return nil, fmt.Errorf("redis: expected '$', but got %q", line) + } + + argLen, err := strconv.ParseInt(string(line[1:]), 10, 32) + if err != nil { + return nil, err + } + + arg, err := readN(rd, int(argLen)+2) + if err != nil { + return nil, err + } + args = append(args, string(arg[:argLen])) + } + return args, nil +} + +//------------------------------------------------------------------------------ + +func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + switch line[0] { + case '-': + return nil, errorf(string(line[1:])) + case '+': + return string(line[1:]), nil + case ':': + v, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + return v, nil + case '$': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + replyLen, err := strconv.Atoi(string(line[1:])) + if err != nil { + return nil, err + } + + b, err := readN(rd, replyLen+2) + if err != nil { + return nil, err + } + return string(b[:replyLen]), nil + case '*': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + return p(rd, repliesNum) + } + return nil, fmt.Errorf("redis: can't parse %q", line) +} + +func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := parseReply(rd, parseSlice) + if err == Nil { + vals = append(vals, nil) + } else if err != nil { + return nil, err + } else { + vals = append(vals, v) + } + } + return vals, nil +} + +func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]string, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", viface) + } + vals = append(vals, v) + } + return vals, nil +} + +func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.(int64) + if !ok { + return nil, fmt.Errorf("got %T, expected int64", viface) + } + vals = append(vals, v == 1) + } + return vals, nil +} + +func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + keyiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + key, ok := keyiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", keyiface) + } + + valueiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + value, ok := valueiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", valueiface) + } + + m[key] = value + } + return m, nil +} + +func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + z := &zz[i/2] + + memberiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + member, ok := memberiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", memberiface) + } + z.Member = member + + scoreiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + scorestr, ok := scoreiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", scoreiface) + } + score, err := strconv.ParseFloat(scorestr, 64) + if err != nil { + return nil, err + } + z.Score = score + } + return zz, nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go new file mode 100644 index 000000000..1b9e15810 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go @@ -0,0 +1,54 @@ +package redis + +import ( + "testing" + + "gopkg.in/bufio.v1" +) + +func BenchmarkParseReplyStatus(b *testing.B) { + benchmarkParseReply(b, "+OK\r\n", nil, false) +} + +func BenchmarkParseReplyInt(b *testing.B) { + benchmarkParseReply(b, ":1\r\n", nil, false) +} + +func BenchmarkParseReplyError(b *testing.B) { + benchmarkParseReply(b, "-Error message\r\n", nil, true) +} + +func BenchmarkParseReplyString(b *testing.B) { + benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false) +} + +func BenchmarkParseReplySlice(b *testing.B) { + benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", parseSlice, false) +} + +func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) { + b.StopTimer() + + buf := &bufio.Buffer{} + rd := bufio.NewReader(buf) + for i := 0; i < b.N; i++ { + buf.WriteString(reply) + } + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := parseReply(rd, p) + if !wanterr && err != nil { + panic(err) + } + } +} + +func BenchmarkAppendArgs(b *testing.B) { + buf := make([]byte, 0, 64) + args := []string{"hello", "world", "foo", "bar"} + for i := 0; i < b.N; i++ { + appendArgs(buf, args) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go new file mode 100644 index 000000000..540d6c51d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go @@ -0,0 +1,91 @@ +package redis + +// Not thread-safe. +type Pipeline struct { + *Client + + closed bool +} + +func (c *Client) Pipeline() *Pipeline { + return &Pipeline{ + Client: &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: c.connPool, + + cmds: make([]Cmder, 0), + }, + }, + } +} + +func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) { + pc := c.Pipeline() + if err := f(pc); err != nil { + return nil, err + } + cmds, err := pc.Exec() + pc.Close() + return cmds, err +} + +func (c *Pipeline) Close() error { + c.closed = true + return nil +} + +func (c *Pipeline) Discard() error { + if c.closed { + return errClosed + } + c.cmds = c.cmds[:0] + return nil +} + +// Exec always returns list of commands and error of the first failed +// command if any. +func (c *Pipeline) Exec() ([]Cmder, error) { + if c.closed { + return nil, errClosed + } + + cmds := c.cmds + c.cmds = make([]Cmder, 0) + + if len(cmds) == 0 { + return []Cmder{}, nil + } + + cn, err := c.conn() + if err != nil { + setCmdsErr(cmds, err) + return cmds, err + } + + if err := c.execCmds(cn, cmds); err != nil { + c.freeConn(cn, err) + return cmds, err + } + + c.putConn(cn) + return cmds, nil +} + +func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error { + if err := c.writeCmd(cn, cmds...); err != nil { + setCmdsErr(cmds, err) + return err + } + + var firstCmdErr error + for _, cmd := range cmds { + if err := cmd.parseReply(cn.rd); err != nil { + if firstCmdErr == nil { + firstCmdErr = err + } + } + } + + return firstCmdErr +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go new file mode 100644 index 000000000..bca4d1963 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go @@ -0,0 +1,405 @@ +package redis + +import ( + "container/list" + "errors" + "log" + "net" + "sync" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + errClosed = errors.New("redis: client is closed") + errRateLimited = errors.New("redis: you open connections too fast") +) + +var ( + zeroTime = time.Time{} +) + +type pool interface { + Get() (*conn, bool, error) + Put(*conn) error + Remove(*conn) error + Len() int + Size() int + Close() error + Filter(func(*conn) bool) +} + +//------------------------------------------------------------------------------ + +type conn struct { + netcn net.Conn + rd *bufio.Reader + buf []byte + + inUse bool + usedAt time.Time + + readTimeout time.Duration + writeTimeout time.Duration + + elem *list.Element +} + +func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) { + return func() (*conn, error) { + netcn, err := dial() + if err != nil { + return nil, err + } + cn := &conn{ + netcn: netcn, + buf: make([]byte, 0, 64), + } + cn.rd = bufio.NewReader(cn) + return cn, nil + } +} + +func (cn *conn) Read(b []byte) (int, error) { + if cn.readTimeout != 0 { + cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout)) + } else { + cn.netcn.SetReadDeadline(zeroTime) + } + return cn.netcn.Read(b) +} + +func (cn *conn) Write(b []byte) (int, error) { + if cn.writeTimeout != 0 { + cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout)) + } else { + cn.netcn.SetWriteDeadline(zeroTime) + } + return cn.netcn.Write(b) +} + +func (cn *conn) RemoteAddr() net.Addr { + return cn.netcn.RemoteAddr() +} + +func (cn *conn) Close() error { + return cn.netcn.Close() +} + +//------------------------------------------------------------------------------ + +type connPool struct { + dial func() (*conn, error) + rl *rateLimiter + + opt *options + + cond *sync.Cond + conns *list.List + + idleNum int + closed bool +} + +func newConnPool(dial func() (*conn, error), opt *options) *connPool { + return &connPool{ + dial: dial, + rl: newRateLimiter(time.Second, 2*opt.PoolSize), + + opt: opt, + + cond: sync.NewCond(&sync.Mutex{}), + conns: list.New(), + } +} + +func (p *connPool) new() (*conn, error) { + if !p.rl.Check() { + return nil, errRateLimited + } + return p.dial() +} + +func (p *connPool) Get() (*conn, bool, error) { + p.cond.L.Lock() + + if p.closed { + p.cond.L.Unlock() + return nil, false, errClosed + } + + if p.opt.IdleTimeout > 0 { + for el := p.conns.Front(); el != nil; el = el.Next() { + cn := el.Value.(*conn) + if cn.inUse { + break + } + if time.Since(cn.usedAt) > p.opt.IdleTimeout { + if err := p.remove(cn); err != nil { + log.Printf("remove failed: %s", err) + } + } + } + } + + for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 { + p.cond.Wait() + } + + if p.idleNum > 0 { + elem := p.conns.Front() + cn := elem.Value.(*conn) + if cn.inUse { + panic("pool: precondition failed") + } + cn.inUse = true + p.conns.MoveToBack(elem) + p.idleNum-- + + p.cond.L.Unlock() + return cn, false, nil + } + + if p.conns.Len() < p.opt.PoolSize { + cn, err := p.new() + if err != nil { + p.cond.L.Unlock() + return nil, false, err + } + + cn.inUse = true + cn.elem = p.conns.PushBack(cn) + + p.cond.L.Unlock() + return cn, true, nil + } + + panic("not reached") +} + +func (p *connPool) Put(cn *conn) error { + if cn.rd.Buffered() != 0 { + b, _ := cn.rd.ReadN(cn.rd.Buffered()) + log.Printf("redis: connection has unread data: %q", b) + return p.Remove(cn) + } + + if p.opt.IdleTimeout > 0 { + cn.usedAt = time.Now() + } + + p.cond.L.Lock() + if p.closed { + p.cond.L.Unlock() + return errClosed + } + cn.inUse = false + p.conns.MoveToFront(cn.elem) + p.idleNum++ + p.cond.Signal() + p.cond.L.Unlock() + + return nil +} + +func (p *connPool) Remove(cn *conn) error { + p.cond.L.Lock() + if p.closed { + // Noop, connection is already closed. + p.cond.L.Unlock() + return nil + } + err := p.remove(cn) + p.cond.Signal() + p.cond.L.Unlock() + return err +} + +func (p *connPool) remove(cn *conn) error { + p.conns.Remove(cn.elem) + cn.elem = nil + if !cn.inUse { + p.idleNum-- + } + return cn.Close() +} + +// Len returns number of idle connections. +func (p *connPool) Len() int { + defer p.cond.L.Unlock() + p.cond.L.Lock() + return p.idleNum +} + +// Size returns number of connections in the pool. +func (p *connPool) Size() int { + defer p.cond.L.Unlock() + p.cond.L.Lock() + return p.conns.Len() +} + +func (p *connPool) Filter(f func(*conn) bool) { + p.cond.L.Lock() + for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next { + next = el.Next() + cn := el.Value.(*conn) + if !f(cn) { + p.remove(cn) + } + } + p.cond.L.Unlock() +} + +func (p *connPool) Close() error { + defer p.cond.L.Unlock() + p.cond.L.Lock() + if p.closed { + return nil + } + p.closed = true + p.rl.Close() + var retErr error + for { + e := p.conns.Front() + if e == nil { + break + } + if err := p.remove(e.Value.(*conn)); err != nil { + log.Printf("cn.Close failed: %s", err) + retErr = err + } + } + return retErr +} + +//------------------------------------------------------------------------------ + +type singleConnPool struct { + pool pool + + cnMtx sync.Mutex + cn *conn + + reusable bool + + closed bool +} + +func newSingleConnPool(pool pool, reusable bool) *singleConnPool { + return &singleConnPool{ + pool: pool, + reusable: reusable, + } +} + +func (p *singleConnPool) SetConn(cn *conn) { + p.cnMtx.Lock() + p.cn = cn + p.cnMtx.Unlock() +} + +func (p *singleConnPool) Get() (*conn, bool, error) { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + + if p.closed { + return nil, false, errClosed + } + if p.cn != nil { + return p.cn, false, nil + } + + cn, isNew, err := p.pool.Get() + if err != nil { + return nil, false, err + } + p.cn = cn + + return p.cn, isNew, nil +} + +func (p *singleConnPool) Put(cn *conn) error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return nil +} + +func (p *singleConnPool) put() error { + err := p.pool.Put(p.cn) + p.cn = nil + return err +} + +func (p *singleConnPool) Remove(cn *conn) error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + panic("p.cn == nil") + } + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return p.remove() +} + +func (p *singleConnPool) remove() error { + err := p.pool.Remove(p.cn) + p.cn = nil + return err +} + +func (p *singleConnPool) Len() int { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + return 0 + } + return 1 +} + +func (p *singleConnPool) Size() int { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + return 0 + } + return 1 +} + +func (p *singleConnPool) Filter(f func(*conn) bool) { + p.cnMtx.Lock() + if p.cn != nil { + if !f(p.cn) { + p.remove() + } + } + p.cnMtx.Unlock() +} + +func (p *singleConnPool) Close() error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.closed { + return nil + } + p.closed = true + var err error + if p.cn != nil { + if p.reusable { + err = p.put() + } else { + err = p.remove() + } + } + return err +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go new file mode 100644 index 000000000..6ac130bac --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go @@ -0,0 +1,134 @@ +package redis + +import ( + "fmt" + "time" +) + +// Not thread-safe. +type PubSub struct { + *baseClient +} + +func (c *Client) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +func (c *Client) Publish(channel, message string) *IntCmd { + req := NewIntCmd("PUBLISH", channel, message) + c.Process(req) + return req +} + +type Message struct { + Channel string + Payload string +} + +func (m *Message) String() string { + return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) +} + +type PMessage struct { + Channel string + Pattern string + Payload string +} + +func (m *PMessage) String() string { + return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload) +} + +type Subscription struct { + Kind string + Channel string + Count int +} + +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} + +func (c *PubSub) Receive() (interface{}, error) { + return c.ReceiveTimeout(0) +} + +func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { + cn, err := c.conn() + if err != nil { + return nil, err + } + cn.readTimeout = timeout + + cmd := NewSliceCmd() + if err := cmd.parseReply(cn.rd); err != nil { + return nil, err + } + + reply := cmd.Val() + + msgName := reply[0].(string) + switch msgName { + case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": + return &Subscription{ + Kind: msgName, + Channel: reply[1].(string), + Count: int(reply[2].(int64)), + }, nil + case "message": + return &Message{ + Channel: reply[1].(string), + Payload: reply[2].(string), + }, nil + case "pmessage": + return &PMessage{ + Pattern: reply[1].(string), + Channel: reply[2].(string), + Payload: reply[3].(string), + }, nil + } + return nil, fmt.Errorf("redis: unsupported message name: %q", msgName) +} + +func (c *PubSub) subscribe(cmd string, channels ...string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := append([]string{cmd}, channels...) + req := NewSliceCmd(args...) + return c.writeCmd(cn, req) +} + +func (c *PubSub) Subscribe(channels ...string) error { + return c.subscribe("SUBSCRIBE", channels...) +} + +func (c *PubSub) PSubscribe(patterns ...string) error { + return c.subscribe("PSUBSCRIBE", patterns...) +} + +func (c *PubSub) unsubscribe(cmd string, channels ...string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := append([]string{cmd}, channels...) + req := NewSliceCmd(args...) + return c.writeCmd(cn, req) +} + +func (c *PubSub) Unsubscribe(channels ...string) error { + return c.unsubscribe("UNSUBSCRIBE", channels...) +} + +func (c *PubSub) PUnsubscribe(patterns ...string) error { + return c.unsubscribe("PUNSUBSCRIBE", patterns...) +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go new file mode 100644 index 000000000..20d851270 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go @@ -0,0 +1,53 @@ +package redis + +import ( + "sync/atomic" + "time" +) + +type rateLimiter struct { + v int64 + + _closed int64 +} + +func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter { + rl := &rateLimiter{ + v: int64(bucketSize), + } + go rl.loop(limit, int64(bucketSize)) + return rl +} + +func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) { + for { + if rl.closed() { + break + } + if v := atomic.LoadInt64(&rl.v); v < bucketSize { + atomic.AddInt64(&rl.v, 1) + } + time.Sleep(limit) + } +} + +func (rl *rateLimiter) Check() bool { + for { + if v := atomic.LoadInt64(&rl.v); v > 0 { + if atomic.CompareAndSwapInt64(&rl.v, v, v-1) { + return true + } + } else { + return false + } + } +} + +func (rl *rateLimiter) Close() error { + atomic.StoreInt64(&rl._closed, 1) + return nil +} + +func (rl *rateLimiter) closed() bool { + return atomic.LoadInt64(&rl._closed) == 1 +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go new file mode 100644 index 000000000..2f0d41a2e --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go @@ -0,0 +1,31 @@ +package redis + +import ( + "sync" + "testing" + "time" +) + +func TestRateLimiter(t *testing.T) { + var n = 100000 + if testing.Short() { + n = 1000 + } + rl := newRateLimiter(time.Minute, n) + + wg := &sync.WaitGroup{} + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + if !rl.Check() { + panic("check failed") + } + wg.Done() + }() + } + wg.Wait() + + if rl.Check() && rl.Check() { + t.Fatal("check passed") + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go new file mode 100644 index 000000000..0d15dc8f8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go @@ -0,0 +1,231 @@ +package redis + +import ( + "log" + "net" + "time" +) + +type baseClient struct { + connPool pool + opt *options + cmds []Cmder +} + +func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error { + buf := cn.buf[:0] + for _, cmd := range cmds { + buf = appendArgs(buf, cmd.args()) + } + + _, err := cn.Write(buf) + return err +} + +func (c *baseClient) conn() (*conn, error) { + cn, isNew, err := c.connPool.Get() + if err != nil { + return nil, err + } + + if isNew { + if err := c.initConn(cn); err != nil { + c.removeConn(cn) + return nil, err + } + } + + return cn, nil +} + +func (c *baseClient) initConn(cn *conn) error { + if c.opt.Password == "" && c.opt.DB == 0 { + return nil + } + + pool := newSingleConnPool(c.connPool, false) + pool.SetConn(cn) + + // Client is not closed because we want to reuse underlying connection. + client := &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: pool, + }, + } + + if c.opt.Password != "" { + if err := client.Auth(c.opt.Password).Err(); err != nil { + return err + } + } + + if c.opt.DB > 0 { + if err := client.Select(c.opt.DB).Err(); err != nil { + return err + } + } + + return nil +} + +func (c *baseClient) freeConn(cn *conn, ei error) error { + if cn.rd.Buffered() > 0 { + return c.connPool.Remove(cn) + } + if _, ok := ei.(redisError); ok { + return c.connPool.Put(cn) + } + return c.connPool.Remove(cn) +} + +func (c *baseClient) removeConn(cn *conn) { + if err := c.connPool.Remove(cn); err != nil { + log.Printf("pool.Remove failed: %s", err) + } +} + +func (c *baseClient) putConn(cn *conn) { + if err := c.connPool.Put(cn); err != nil { + log.Printf("pool.Put failed: %s", err) + } +} + +func (c *baseClient) Process(cmd Cmder) { + if c.cmds == nil { + c.run(cmd) + } else { + c.cmds = append(c.cmds, cmd) + } +} + +func (c *baseClient) run(cmd Cmder) { + cn, err := c.conn() + if err != nil { + cmd.setErr(err) + return + } + + if timeout := cmd.writeTimeout(); timeout != nil { + cn.writeTimeout = *timeout + } else { + cn.writeTimeout = c.opt.WriteTimeout + } + + if timeout := cmd.readTimeout(); timeout != nil { + cn.readTimeout = *timeout + } else { + cn.readTimeout = c.opt.ReadTimeout + } + + if err := c.writeCmd(cn, cmd); err != nil { + c.freeConn(cn, err) + cmd.setErr(err) + return + } + + if err := cmd.parseReply(cn.rd); err != nil { + c.freeConn(cn, err) + return + } + + c.putConn(cn) +} + +// Close closes the client, releasing any open resources. +func (c *baseClient) Close() error { + return c.connPool.Close() +} + +//------------------------------------------------------------------------------ + +type options struct { + Password string + DB int64 + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + IdleTimeout time.Duration +} + +type Options struct { + Network string + Addr string + + // Dialer creates new network connection and has priority over + // Network and Addr options. + Dialer func() (net.Conn, error) + + Password string + DB int64 + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + IdleTimeout time.Duration +} + +func (opt *Options) getPoolSize() int { + if opt.PoolSize == 0 { + return 10 + } + return opt.PoolSize +} + +func (opt *Options) getDialTimeout() time.Duration { + if opt.DialTimeout == 0 { + return 5 * time.Second + } + return opt.DialTimeout +} + +func (opt *Options) options() *options { + return &options{ + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.getDialTimeout(), + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.getPoolSize(), + IdleTimeout: opt.IdleTimeout, + } +} + +type Client struct { + *baseClient +} + +func NewClient(clOpt *Options) *Client { + opt := clOpt.options() + dialer := clOpt.Dialer + if dialer == nil { + dialer = func() (net.Conn, error) { + return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout) + } + } + return &Client{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(newConnFunc(dialer), opt), + }, + } +} + +// Deprecated. Use NewClient instead. +func NewTCPClient(opt *Options) *Client { + opt.Network = "tcp" + return NewClient(opt) +} + +// Deprecated. Use NewClient instead. +func NewUnixClient(opt *Options) *Client { + opt.Network = "unix" + return NewClient(opt) +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go new file mode 100644 index 000000000..49f84d0e1 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go @@ -0,0 +1,3333 @@ +package redis_test + +import ( + "bytes" + "fmt" + "io" + "net" + "sort" + "strconv" + "sync" + "testing" + "time" + + "gopkg.in/redis.v2" + + . "gopkg.in/check.v1" +) + +const redisAddr = ":6379" + +//------------------------------------------------------------------------------ + +func sortStrings(slice []string) []string { + sort.Strings(slice) + return slice +} + +//------------------------------------------------------------------------------ + +type RedisConnectorTest struct{} + +var _ = Suite(&RedisConnectorTest{}) + +func (t *RedisConnectorTest) TestShutdown(c *C) { + c.Skip("shutdowns server") + + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + shutdown := client.Shutdown() + c.Check(shutdown.Err(), Equals, io.EOF) + c.Check(shutdown.Val(), Equals, "") + + ping := client.Ping() + c.Check(ping.Err(), ErrorMatches, "dial tcp :[0-9]+: connection refused") + c.Check(ping.Val(), Equals, "") +} + +func (t *RedisConnectorTest) TestNewTCPClient(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestNewUnixClient(c *C) { + c.Skip("not available on Travis CI") + + client := redis.NewUnixClient(&redis.Options{ + Addr: "/tmp/redis.sock", + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestDialer(c *C) { + client := redis.NewClient(&redis.Options{ + Dialer: func() (net.Conn, error) { + return net.Dial("tcp", redisAddr) + }, + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + c.Assert(client.Close(), IsNil) + + ping := client.Ping() + c.Assert(ping.Err(), Not(IsNil)) + c.Assert(ping.Err().Error(), Equals, "redis: client is closed") + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestPubSubClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + pubsub := client.PubSub() + c.Assert(pubsub.Close(), IsNil) + + _, err := pubsub.Receive() + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestMultiClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + multi := client.Multi() + c.Assert(multi.Close(), IsNil) + + _, err := multi.Exec(func() error { + multi.Ping() + return nil + }) + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestPipelineClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + _, err := client.Pipelined(func(pipeline *redis.Pipeline) error { + c.Assert(pipeline.Close(), IsNil) + pipeline.Ping() + return nil + }) + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestIdleTimeout(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + IdleTimeout: time.Nanosecond, + }) + for i := 0; i < 10; i++ { + c.Assert(client.Ping().Err(), IsNil) + } +} + +func (t *RedisConnectorTest) TestSelectDb(c *C) { + client1 := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + DB: 1, + }) + c.Assert(client1.Set("key", "db1").Err(), IsNil) + + client2 := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + DB: 2, + }) + c.Assert(client2.Get("key").Err(), Equals, redis.Nil) +} + +//------------------------------------------------------------------------------ + +type RedisConnPoolTest struct { + client *redis.Client +} + +var _ = Suite(&RedisConnPoolTest{}) + +func (t *RedisConnPoolTest) SetUpTest(c *C) { + t.client = redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) +} + +func (t *RedisConnPoolTest) TearDownTest(c *C) { + c.Assert(t.client.FlushDb().Err(), IsNil) + c.Assert(t.client.Close(), IsNil) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSize(c *C) { + wg := &sync.WaitGroup{} + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPipelineClient(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + pipeline := t.client.Pipeline() + ping := pipeline.Ping() + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(pipeline.Close(), IsNil) + + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnMultiClient(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + multi := t.client.Multi() + var ping *redis.StatusCmd + cmds, err := multi.Exec(func() error { + ping = multi.Ping() + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(multi.Close(), IsNil) + + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPubSub(c *C) { + const N = 10 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + pubsub := t.client.PubSub() + c.Assert(pubsub.Subscribe(), IsNil) + c.Assert(pubsub.Close(), IsNil) + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 0) + c.Assert(t.client.Pool().Len(), Equals, 0) +} + +func (t *RedisConnPoolTest) TestConnPoolRemovesBrokenConn(c *C) { + cn, _, err := t.client.Pool().Get() + c.Assert(err, IsNil) + c.Assert(cn.Close(), IsNil) + c.Assert(t.client.Pool().Put(cn), IsNil) + + ping := t.client.Ping() + c.Assert(ping.Err().Error(), Equals, "use of closed network connection") + c.Assert(ping.Val(), Equals, "") + + ping = t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(t.client.Pool().Size(), Equals, 1) + c.Assert(t.client.Pool().Len(), Equals, 1) +} + +func (t *RedisConnPoolTest) TestConnPoolReusesConn(c *C) { + for i := 0; i < 1000; i++ { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + } + + c.Assert(t.client.Pool().Size(), Equals, 1) + c.Assert(t.client.Pool().Len(), Equals, 1) +} + +//------------------------------------------------------------------------------ + +type RedisTest struct { + client *redis.Client +} + +var _ = Suite(&RedisTest{}) + +func Test(t *testing.T) { TestingT(t) } + +func (t *RedisTest) SetUpTest(c *C) { + t.client = redis.NewTCPClient(&redis.Options{ + Addr: ":6379", + }) + + // This is much faster than Flushall. + c.Assert(t.client.Select(1).Err(), IsNil) + c.Assert(t.client.FlushDb().Err(), IsNil) + c.Assert(t.client.Select(0).Err(), IsNil) + c.Assert(t.client.FlushDb().Err(), IsNil) +} + +func (t *RedisTest) TearDownTest(c *C) { + c.Assert(t.client.Close(), IsNil) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdStringMethod(c *C) { + set := t.client.Set("foo", "bar") + c.Assert(set.String(), Equals, "SET foo bar: OK") + + get := t.client.Get("foo") + c.Assert(get.String(), Equals, "GET foo: bar") +} + +func (t *RedisTest) TestCmdStringMethodError(c *C) { + get2 := t.client.Get("key_does_not_exists") + c.Assert(get2.String(), Equals, "GET key_does_not_exists: redis: nil") +} + +func (t *RedisTest) TestRunWithouthCheckingErrVal(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") +} + +func (t *RedisTest) TestGetSpecChars(c *C) { + set := t.client.Set("key", "hello1\r\nhello2\r\n") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello1\r\nhello2\r\n") +} + +func (t *RedisTest) TestGetBigVal(c *C) { + val := string(bytes.Repeat([]byte{'*'}, 1<<16)) + + set := t.client.Set("key", val) + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, val) +} + +func (t *RedisTest) TestManyKeys(c *C) { + var n = 100000 + + for i := 0; i < n; i++ { + t.client.Set("keys.key"+strconv.Itoa(i), "hello"+strconv.Itoa(i)) + } + keys := t.client.Keys("keys.*") + c.Assert(keys.Err(), IsNil) + c.Assert(len(keys.Val()), Equals, n) +} + +func (t *RedisTest) TestManyKeys2(c *C) { + var n = 100000 + + keys := []string{"non-existent-key"} + for i := 0; i < n; i++ { + key := "keys.key" + strconv.Itoa(i) + t.client.Set(key, "hello"+strconv.Itoa(i)) + keys = append(keys, key) + } + keys = append(keys, "non-existent-key") + + mget := t.client.MGet(keys...) + c.Assert(mget.Err(), IsNil) + c.Assert(len(mget.Val()), Equals, n+2) + vals := mget.Val() + for i := 0; i < n; i++ { + c.Assert(vals[i+1], Equals, "hello"+strconv.Itoa(i)) + } + c.Assert(vals[0], Equals, nil) + c.Assert(vals[n+1], Equals, nil) +} + +func (t *RedisTest) TestStringCmdHelpers(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + + n, err := t.client.Get("key").Int64() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + + un, err := t.client.Get("key").Uint64() + c.Assert(err, IsNil) + c.Assert(un, Equals, uint64(10)) + + f, err := t.client.Get("key").Float64() + c.Assert(err, IsNil) + c.Assert(f, Equals, float64(10)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestAuth(c *C) { + auth := t.client.Auth("password") + c.Assert(auth.Err(), ErrorMatches, "ERR Client sent AUTH, but no password is set") + c.Assert(auth.Val(), Equals, "") +} + +func (t *RedisTest) TestEcho(c *C) { + echo := t.client.Echo("hello") + c.Assert(echo.Err(), IsNil) + c.Assert(echo.Val(), Equals, "hello") +} + +func (t *RedisTest) TestPing(c *C) { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") +} + +func (t *RedisTest) TestSelect(c *C) { + sel := t.client.Select(1) + c.Assert(sel.Err(), IsNil) + c.Assert(sel.Val(), Equals, "OK") +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdKeysDel(c *C) { + set := t.client.Set("key1", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + set = t.client.Set("key2", "World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + del := t.client.Del("key1", "key2", "key3") + c.Assert(del.Err(), IsNil) + c.Assert(del.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdKeysDump(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + dump := t.client.Dump("key") + c.Assert(dump.Err(), IsNil) + c.Assert(dump.Val(), Equals, "\x00\x05hello\x06\x00\xf5\x9f\xb7\xf6\x90a\x1c\x99") +} + +func (t *RedisTest) TestCmdKeysExists(c *C) { + set := t.client.Set("key1", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + exists := t.client.Exists("key1") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, true) + + exists = t.client.Exists("key2") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdKeysExpire(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 10*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) + + set = t.client.Set("key", "Hello World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) +} + +func (t *RedisTest) TestCmdKeysExpireAt(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + exists := t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, true) + + expireAt := t.client.ExpireAt("key", time.Now().Add(-time.Hour)) + c.Assert(expireAt.Err(), IsNil) + c.Assert(expireAt.Val(), Equals, true) + + exists = t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdKeysKeys(c *C) { + mset := t.client.MSet("one", "1", "two", "2", "three", "3", "four", "4") + c.Assert(mset.Err(), IsNil) + c.Assert(mset.Val(), Equals, "OK") + + keys := t.client.Keys("*o*") + c.Assert(keys.Err(), IsNil) + c.Assert(sortStrings(keys.Val()), DeepEquals, []string{"four", "one", "two"}) + + keys = t.client.Keys("t??") + c.Assert(keys.Err(), IsNil) + c.Assert(keys.Val(), DeepEquals, []string{"two"}) + + keys = t.client.Keys("*") + c.Assert(keys.Err(), IsNil) + c.Assert( + sortStrings(keys.Val()), + DeepEquals, + []string{"four", "one", "three", "two"}, + ) +} + +func (t *RedisTest) TestCmdKeysMigrate(c *C) { + migrate := t.client.Migrate("localhost", "6380", "key", 0, 0) + c.Assert(migrate.Err(), IsNil) + c.Assert(migrate.Val(), Equals, "NOKEY") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + migrate = t.client.Migrate("localhost", "6380", "key", 0, 0) + c.Assert(migrate.Err(), ErrorMatches, "IOERR error or timeout writing to target instance") + c.Assert(migrate.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdKeysMove(c *C) { + move := t.client.Move("key", 1) + c.Assert(move.Err(), IsNil) + c.Assert(move.Val(), Equals, false) + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + move = t.client.Move("key", 1) + c.Assert(move.Err(), IsNil) + c.Assert(move.Val(), Equals, true) + + get := t.client.Get("key") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + sel := t.client.Select(1) + c.Assert(sel.Err(), IsNil) + c.Assert(sel.Val(), Equals, "OK") + + get = t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysObject(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + refCount := t.client.ObjectRefCount("key") + c.Assert(refCount.Err(), IsNil) + c.Assert(refCount.Val(), Equals, int64(1)) + + enc := t.client.ObjectEncoding("key") + c.Assert(enc.Err(), IsNil) + c.Assert(enc.Val(), Equals, "raw") + + idleTime := t.client.ObjectIdleTime("key") + c.Assert(idleTime.Err(), IsNil) + c.Assert(idleTime.Val(), Equals, time.Duration(0)) +} + +func (t *RedisTest) TestCmdKeysPersist(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 10*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) + + persist := t.client.Persist("key") + c.Assert(persist.Err(), IsNil) + c.Assert(persist.Val(), Equals, true) + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPExpire(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := 900 * time.Millisecond + pexpire := t.client.PExpire("key", expiration) + c.Assert(pexpire.Err(), IsNil) + c.Assert(pexpire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, time.Second) + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPExpireAt(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := 900 * time.Millisecond + pexpireat := t.client.PExpireAt("key", time.Now().Add(expiration)) + c.Assert(pexpireat.Err(), IsNil) + c.Assert(pexpireat.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, time.Second) + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPTTL(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := time.Second + expire := t.client.Expire("key", expiration) + c.Assert(expire.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysRandomKey(c *C) { + randomKey := t.client.RandomKey() + c.Assert(randomKey.Err(), Equals, redis.Nil) + c.Assert(randomKey.Val(), Equals, "") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + randomKey = t.client.RandomKey() + c.Assert(randomKey.Err(), IsNil) + c.Assert(randomKey.Val(), Equals, "key") +} + +func (t *RedisTest) TestCmdKeysRename(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + status := t.client.Rename("key", "key1") + c.Assert(status.Err(), IsNil) + c.Assert(status.Val(), Equals, "OK") + + get := t.client.Get("key1") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysRenameNX(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + renameNX := t.client.RenameNX("key", "key1") + c.Assert(renameNX.Err(), IsNil) + c.Assert(renameNX.Val(), Equals, true) + + get := t.client.Get("key1") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysRestore(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + dump := t.client.Dump("key") + c.Assert(dump.Err(), IsNil) + + del := t.client.Del("key") + c.Assert(del.Err(), IsNil) + + restore := t.client.Restore("key", 0, dump.Val()) + c.Assert(restore.Err(), IsNil) + c.Assert(restore.Val(), Equals, "OK") + + type_ := t.client.Type("key") + c.Assert(type_.Err(), IsNil) + c.Assert(type_.Val(), Equals, "string") + + lRange := t.client.Get("key") + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysSort(c *C) { + lPush := t.client.LPush("list", "1") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(1)) + lPush = t.client.LPush("list", "3") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(2)) + lPush = t.client.LPush("list", "2") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(3)) + + sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}) + c.Assert(sort.Err(), IsNil) + c.Assert(sort.Val(), DeepEquals, []string{"1", "2"}) +} + +func (t *RedisTest) TestCmdKeysSortBy(c *C) { + lPush := t.client.LPush("list", "1") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(1)) + lPush = t.client.LPush("list", "3") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(2)) + lPush = t.client.LPush("list", "2") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(3)) + + set := t.client.Set("weight_1", "5") + c.Assert(set.Err(), IsNil) + set = t.client.Set("weight_2", "2") + c.Assert(set.Err(), IsNil) + set = t.client.Set("weight_3", "8") + c.Assert(set.Err(), IsNil) + + sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC", By: "weight_*"}) + c.Assert(sort.Err(), IsNil) + c.Assert(sort.Val(), DeepEquals, []string{"2", "1"}) +} + +func (t *RedisTest) TestCmdKeysTTL(c *C) { + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 60*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 60*time.Second) +} + +func (t *RedisTest) TestCmdKeysType(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + type_ := t.client.Type("key") + c.Assert(type_.Err(), IsNil) + c.Assert(type_.Val(), Equals, "string") +} + +func (t *RedisTest) TestCmdScan(c *C) { + for i := 0; i < 1000; i++ { + set := t.client.Set(fmt.Sprintf("key%d", i), "hello") + c.Assert(set.Err(), IsNil) + } + + cursor, keys, err := t.client.Scan(0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdSScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.SAdd("myset", fmt.Sprintf("member%d", i)) + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.SScan("myset", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdHScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.HSet("myhash", fmt.Sprintf("key%d", i), "hello") + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.HScan("myhash", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdZScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.ZAdd("myset", redis.Z{float64(i), fmt.Sprintf("member%d", i)}) + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.ZScan("myset", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestStringsAppend(c *C) { + exists := t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) + + append := t.client.Append("key", "Hello") + c.Assert(append.Err(), IsNil) + c.Assert(append.Val(), Equals, int64(5)) + + append = t.client.Append("key", " World") + c.Assert(append.Err(), IsNil) + c.Assert(append.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "Hello World") +} + +func (t *RedisTest) TestStringsBitCount(c *C) { + set := t.client.Set("key", "foobar") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitCount := t.client.BitCount("key", nil) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(26)) + + bitCount = t.client.BitCount("key", &redis.BitCount{0, 0}) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(4)) + + bitCount = t.client.BitCount("key", &redis.BitCount{1, 1}) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(6)) +} + +func (t *RedisTest) TestStringsBitOpAnd(c *C) { + set := t.client.Set("key1", "1") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "0") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpAnd := t.client.BitOpAnd("dest", "key1", "key2") + c.Assert(bitOpAnd.Err(), IsNil) + c.Assert(bitOpAnd.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "0") +} + +func (t *RedisTest) TestStringsBitOpOr(c *C) { + set := t.client.Set("key1", "1") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "0") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpOr := t.client.BitOpOr("dest", "key1", "key2") + c.Assert(bitOpOr.Err(), IsNil) + c.Assert(bitOpOr.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "1") +} + +func (t *RedisTest) TestStringsBitOpXor(c *C) { + set := t.client.Set("key1", "\xff") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "\x0f") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpXor := t.client.BitOpXor("dest", "key1", "key2") + c.Assert(bitOpXor.Err(), IsNil) + c.Assert(bitOpXor.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "\xf0") +} + +func (t *RedisTest) TestStringsBitOpNot(c *C) { + set := t.client.Set("key1", "\x00") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpNot := t.client.BitOpNot("dest", "key1") + c.Assert(bitOpNot.Err(), IsNil) + c.Assert(bitOpNot.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "\xff") +} + +func (t *RedisTest) TestStringsDecr(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decr := t.client.Decr("key") + c.Assert(decr.Err(), IsNil) + c.Assert(decr.Val(), Equals, int64(9)) + + set = t.client.Set("key", "234293482390480948029348230948") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decr = t.client.Decr("key") + c.Assert(decr.Err(), ErrorMatches, "ERR value is not an integer or out of range") + c.Assert(decr.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestStringsDecrBy(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decrBy := t.client.DecrBy("key", 5) + c.Assert(decrBy.Err(), IsNil) + c.Assert(decrBy.Val(), Equals, int64(5)) +} + +func (t *RedisTest) TestStringsGet(c *C) { + get := t.client.Get("_") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get = t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsGetBit(c *C) { + setBit := t.client.SetBit("key", 7, 1) + c.Assert(setBit.Err(), IsNil) + c.Assert(setBit.Val(), Equals, int64(0)) + + getBit := t.client.GetBit("key", 0) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(0)) + + getBit = t.client.GetBit("key", 7) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(1)) + + getBit = t.client.GetBit("key", 100) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestStringsGetRange(c *C) { + set := t.client.Set("key", "This is a string") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + getRange := t.client.GetRange("key", 0, 3) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "This") + + getRange = t.client.GetRange("key", -3, -1) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "ing") + + getRange = t.client.GetRange("key", 0, -1) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "This is a string") + + getRange = t.client.GetRange("key", 10, 100) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "string") +} + +func (t *RedisTest) TestStringsGetSet(c *C) { + incr := t.client.Incr("key") + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(1)) + + getSet := t.client.GetSet("key", "0") + c.Assert(getSet.Err(), IsNil) + c.Assert(getSet.Val(), Equals, "1") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "0") +} + +func (t *RedisTest) TestStringsIncr(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incr := t.client.Incr("key") + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "11") +} + +func (t *RedisTest) TestStringsIncrBy(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrBy := t.client.IncrBy("key", 5) + c.Assert(incrBy.Err(), IsNil) + c.Assert(incrBy.Val(), Equals, int64(15)) +} + +func (t *RedisTest) TestIncrByFloat(c *C) { + set := t.client.Set("key", "10.50") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrByFloat := t.client.IncrByFloat("key", 0.1) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, 10.6) + + set = t.client.Set("key", "5.0e3") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrByFloat = t.client.IncrByFloat("key", 2.0e2) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, float64(5200)) +} + +func (t *RedisTest) TestIncrByFloatOverflow(c *C) { + incrByFloat := t.client.IncrByFloat("key", 996945661) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, float64(996945661)) +} + +func (t *RedisTest) TestStringsMSetMGet(c *C) { + mSet := t.client.MSet("key1", "hello1", "key2", "hello2") + c.Assert(mSet.Err(), IsNil) + c.Assert(mSet.Val(), Equals, "OK") + + mGet := t.client.MGet("key1", "key2", "_") + c.Assert(mGet.Err(), IsNil) + c.Assert(mGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil}) +} + +func (t *RedisTest) TestStringsMSetNX(c *C) { + mSetNX := t.client.MSetNX("key1", "hello1", "key2", "hello2") + c.Assert(mSetNX.Err(), IsNil) + c.Assert(mSetNX.Val(), Equals, true) + + mSetNX = t.client.MSetNX("key2", "hello1", "key3", "hello2") + c.Assert(mSetNX.Err(), IsNil) + c.Assert(mSetNX.Val(), Equals, false) +} + +func (t *RedisTest) TestStringsPSetEx(c *C) { + expiration := 50 * time.Millisecond + psetex := t.client.PSetEx("key", expiration, "hello") + c.Assert(psetex.Err(), IsNil) + c.Assert(psetex.Val(), Equals, "OK") + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetGet(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetEx(c *C) { + setEx := t.client.SetEx("key", 10*time.Second, "hello") + c.Assert(setEx.Err(), IsNil) + c.Assert(setEx.Val(), Equals, "OK") + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) +} + +func (t *RedisTest) TestStringsSetNX(c *C) { + setNX := t.client.SetNX("key", "hello") + c.Assert(setNX.Err(), IsNil) + c.Assert(setNX.Val(), Equals, true) + + setNX = t.client.SetNX("key", "hello2") + c.Assert(setNX.Err(), IsNil) + c.Assert(setNX.Val(), Equals, false) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetRange(c *C) { + set := t.client.Set("key", "Hello World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + range_ := t.client.SetRange("key", 6, "Redis") + c.Assert(range_.Err(), IsNil) + c.Assert(range_.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "Hello Redis") +} + +func (t *RedisTest) TestStringsStrLen(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + strLen := t.client.StrLen("key") + c.Assert(strLen.Err(), IsNil) + c.Assert(strLen.Val(), Equals, int64(5)) + + strLen = t.client.StrLen("_") + c.Assert(strLen.Err(), IsNil) + c.Assert(strLen.Val(), Equals, int64(0)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdHDel(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hDel := t.client.HDel("hash", "key") + c.Assert(hDel.Err(), IsNil) + c.Assert(hDel.Val(), Equals, int64(1)) + + hDel = t.client.HDel("hash", "key") + c.Assert(hDel.Err(), IsNil) + c.Assert(hDel.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestCmdHExists(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hExists := t.client.HExists("hash", "key") + c.Assert(hExists.Err(), IsNil) + c.Assert(hExists.Val(), Equals, true) + + hExists = t.client.HExists("hash", "key1") + c.Assert(hExists.Err(), IsNil) + c.Assert(hExists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdHGet(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") + + hGet = t.client.HGet("hash", "key1") + c.Assert(hGet.Err(), Equals, redis.Nil) + c.Assert(hGet.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdHGetAll(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hGetAll := t.client.HGetAll("hash") + c.Assert(hGetAll.Err(), IsNil) + c.Assert(hGetAll.Val(), DeepEquals, []string{"key1", "hello1", "key2", "hello2"}) +} + +func (t *RedisTest) TestCmdHGetAllMap(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hGetAll := t.client.HGetAllMap("hash") + c.Assert(hGetAll.Err(), IsNil) + c.Assert(hGetAll.Val(), DeepEquals, map[string]string{"key1": "hello1", "key2": "hello2"}) +} + +func (t *RedisTest) TestCmdHIncrBy(c *C) { + hSet := t.client.HSet("hash", "key", "5") + c.Assert(hSet.Err(), IsNil) + + hIncrBy := t.client.HIncrBy("hash", "key", 1) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(6)) + + hIncrBy = t.client.HIncrBy("hash", "key", -1) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(5)) + + hIncrBy = t.client.HIncrBy("hash", "key", -10) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(-5)) +} + +func (t *RedisTest) TestCmdHIncrByFloat(c *C) { + hSet := t.client.HSet("hash", "field", "10.50") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, true) + + hIncrByFloat := t.client.HIncrByFloat("hash", "field", 0.1) + c.Assert(hIncrByFloat.Err(), IsNil) + c.Assert(hIncrByFloat.Val(), Equals, 10.6) + + hSet = t.client.HSet("hash", "field", "5.0e3") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, false) + + hIncrByFloat = t.client.HIncrByFloat("hash", "field", 2.0e2) + c.Assert(hIncrByFloat.Err(), IsNil) + c.Assert(hIncrByFloat.Val(), Equals, float64(5200)) +} + +func (t *RedisTest) TestCmdHKeys(c *C) { + hkeys := t.client.HKeys("hash") + c.Assert(hkeys.Err(), IsNil) + c.Assert(hkeys.Val(), DeepEquals, []string{}) + + hset := t.client.HSet("hash", "key1", "hello1") + c.Assert(hset.Err(), IsNil) + hset = t.client.HSet("hash", "key2", "hello2") + c.Assert(hset.Err(), IsNil) + + hkeys = t.client.HKeys("hash") + c.Assert(hkeys.Err(), IsNil) + c.Assert(hkeys.Val(), DeepEquals, []string{"key1", "key2"}) +} + +func (t *RedisTest) TestCmdHLen(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hLen := t.client.HLen("hash") + c.Assert(hLen.Err(), IsNil) + c.Assert(hLen.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdHMGet(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hMGet := t.client.HMGet("hash", "key1", "key2", "_") + c.Assert(hMGet.Err(), IsNil) + c.Assert(hMGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil}) +} + +func (t *RedisTest) TestCmdHMSet(c *C) { + hMSet := t.client.HMSet("hash", "key1", "hello1", "key2", "hello2") + c.Assert(hMSet.Err(), IsNil) + c.Assert(hMSet.Val(), Equals, "OK") + + hGet := t.client.HGet("hash", "key1") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello1") + + hGet = t.client.HGet("hash", "key2") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello2") +} + +func (t *RedisTest) TestCmdHSet(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, true) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdHSetNX(c *C) { + hSetNX := t.client.HSetNX("hash", "key", "hello") + c.Assert(hSetNX.Err(), IsNil) + c.Assert(hSetNX.Val(), Equals, true) + + hSetNX = t.client.HSetNX("hash", "key", "hello") + c.Assert(hSetNX.Err(), IsNil) + c.Assert(hSetNX.Val(), Equals, false) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdHVals(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hVals := t.client.HVals("hash") + c.Assert(hVals.Err(), IsNil) + c.Assert(hVals.Val(), DeepEquals, []string{"hello1", "hello2"}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdListsBLPop(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bLPop := t.client.BLPop(0, "list1", "list2") + c.Assert(bLPop.Err(), IsNil) + c.Assert(bLPop.Val(), DeepEquals, []string{"list1", "a"}) +} + +func (t *RedisTest) TestCmdListsBLPopBlocks(c *C) { + started := make(chan bool) + done := make(chan bool) + go func() { + started <- true + bLPop := t.client.BLPop(0, "list") + c.Assert(bLPop.Err(), IsNil) + c.Assert(bLPop.Val(), DeepEquals, []string{"list", "a"}) + done <- true + }() + <-started + + select { + case <-done: + c.Error("BLPop is not blocked") + case <-time.After(time.Second): + // ok + } + + rPush := t.client.RPush("list", "a") + c.Assert(rPush.Err(), IsNil) + + select { + case <-done: + // ok + case <-time.After(time.Second): + c.Error("BLPop is still blocked") + // ok + } +} + +func (t *RedisTest) TestCmdListsBLPopTimeout(c *C) { + bLPop := t.client.BLPop(1, "list1") + c.Assert(bLPop.Err(), Equals, redis.Nil) + c.Assert(bLPop.Val(), IsNil) +} + +func (t *RedisTest) TestCmdListsBRPop(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bRPop := t.client.BRPop(0, "list1", "list2") + c.Assert(bRPop.Err(), IsNil) + c.Assert(bRPop.Val(), DeepEquals, []string{"list1", "c"}) +} + +func (t *RedisTest) TestCmdListsBRPopBlocks(c *C) { + started := make(chan bool) + done := make(chan bool) + go func() { + started <- true + brpop := t.client.BRPop(0, "list") + c.Assert(brpop.Err(), IsNil) + c.Assert(brpop.Val(), DeepEquals, []string{"list", "a"}) + done <- true + }() + <-started + + select { + case <-done: + c.Error("BRPop is not blocked") + case <-time.After(time.Second): + // ok + } + + rPush := t.client.RPush("list", "a") + c.Assert(rPush.Err(), IsNil) + + select { + case <-done: + // ok + case <-time.After(time.Second): + c.Error("BRPop is still blocked") + // ok + } +} + +func (t *RedisTest) TestCmdListsBRPopLPush(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bRPopLPush := t.client.BRPopLPush("list1", "list2", 0) + c.Assert(bRPopLPush.Err(), IsNil) + c.Assert(bRPopLPush.Val(), Equals, "c") +} + +func (t *RedisTest) TestCmdListsLIndex(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lIndex := t.client.LIndex("list", 0) + c.Assert(lIndex.Err(), IsNil) + c.Assert(lIndex.Val(), Equals, "Hello") + + lIndex = t.client.LIndex("list", -1) + c.Assert(lIndex.Err(), IsNil) + c.Assert(lIndex.Val(), Equals, "World") + + lIndex = t.client.LIndex("list", 3) + c.Assert(lIndex.Err(), Equals, redis.Nil) + c.Assert(lIndex.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdListsLInsert(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "World") + c.Assert(rPush.Err(), IsNil) + + lInsert := t.client.LInsert("list", "BEFORE", "World", "There") + c.Assert(lInsert.Err(), IsNil) + c.Assert(lInsert.Val(), Equals, int64(3)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "There", "World"}) +} + +func (t *RedisTest) TestCmdListsLLen(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lLen := t.client.LLen("list") + c.Assert(lLen.Err(), IsNil) + c.Assert(lLen.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdListsLPop(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lPop := t.client.LPop("list") + c.Assert(lPop.Err(), IsNil) + c.Assert(lPop.Val(), Equals, "one") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestCmdListsLPush(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestCmdListsLPushX(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + + lPushX := t.client.LPushX("list", "Hello") + c.Assert(lPushX.Err(), IsNil) + c.Assert(lPushX.Val(), Equals, int64(2)) + + lPushX = t.client.LPushX("list2", "Hello") + c.Assert(lPushX.Err(), IsNil) + c.Assert(lPushX.Val(), Equals, int64(0)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestCmdListsLRange(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lRange := t.client.LRange("list", 0, 0) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one"}) + + lRange = t.client.LRange("list", -3, 2) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + lRange = t.client.LRange("list", -100, 100) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + lRange = t.client.LRange("list", 5, 10) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestCmdListsLRem(c *C) { + rPush := t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "key") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + + lRem := t.client.LRem("list", -2, "hello") + c.Assert(lRem.Err(), IsNil) + c.Assert(lRem.Val(), Equals, int64(2)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"hello", "key"}) +} + +func (t *RedisTest) TestCmdListsLSet(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lSet := t.client.LSet("list", 0, "four") + c.Assert(lSet.Err(), IsNil) + c.Assert(lSet.Val(), Equals, "OK") + + lSet = t.client.LSet("list", -2, "five") + c.Assert(lSet.Err(), IsNil) + c.Assert(lSet.Val(), Equals, "OK") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"four", "five", "three"}) +} + +func (t *RedisTest) TestCmdListsLTrim(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lTrim := t.client.LTrim("list", 1, -1) + c.Assert(lTrim.Err(), IsNil) + c.Assert(lTrim.Val(), Equals, "OK") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestCmdListsRPop(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + rPop := t.client.RPop("list") + c.Assert(rPop.Err(), IsNil) + c.Assert(rPop.Val(), Equals, "three") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"}) +} + +func (t *RedisTest) TestCmdListsRPopLPush(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + rPopLPush := t.client.RPopLPush("list", "list2") + c.Assert(rPopLPush.Err(), IsNil) + c.Assert(rPopLPush.Val(), Equals, "three") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"three"}) +} + +func (t *RedisTest) TestCmdListsRPush(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(1)) + + rPush = t.client.RPush("list", "World") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(2)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestCmdListsRPushX(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(1)) + + rPushX := t.client.RPushX("list", "World") + c.Assert(rPushX.Err(), IsNil) + c.Assert(rPushX.Val(), Equals, int64(2)) + + rPushX = t.client.RPushX("list2", "World") + c.Assert(rPushX.Err(), IsNil) + c.Assert(rPushX.Val(), Equals, int64(0)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestSAdd(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(0)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestSCard(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sCard := t.client.SCard("set") + c.Assert(sCard.Err(), IsNil) + c.Assert(sCard.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestSDiff(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sDiff := t.client.SDiff("set1", "set2") + c.Assert(sDiff.Err(), IsNil) + c.Assert(sortStrings(sDiff.Val()), DeepEquals, []string{"a", "b"}) +} + +func (t *RedisTest) TestSDiffStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sDiffStore := t.client.SDiffStore("set", "set1", "set2") + c.Assert(sDiffStore.Err(), IsNil) + c.Assert(sDiffStore.Val(), Equals, int64(2)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"a", "b"}) +} + +func (t *RedisTest) TestSInter(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sInter := t.client.SInter("set1", "set2") + c.Assert(sInter.Err(), IsNil) + c.Assert(sInter.Val(), DeepEquals, []string{"c"}) +} + +func (t *RedisTest) TestSInterStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sInterStore := t.client.SInterStore("set", "set1", "set2") + c.Assert(sInterStore.Err(), IsNil) + c.Assert(sInterStore.Val(), Equals, int64(1)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), DeepEquals, []string{"c"}) +} + +func (t *RedisTest) TestIsMember(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + + sIsMember := t.client.SIsMember("set", "one") + c.Assert(sIsMember.Err(), IsNil) + c.Assert(sIsMember.Val(), Equals, true) + + sIsMember = t.client.SIsMember("set", "two") + c.Assert(sIsMember.Err(), IsNil) + c.Assert(sIsMember.Val(), Equals, false) +} + +func (t *RedisTest) TestSMembers(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestSMove(c *C) { + sAdd := t.client.SAdd("set1", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "two") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "three") + c.Assert(sAdd.Err(), IsNil) + + sMove := t.client.SMove("set1", "set2", "two") + c.Assert(sMove.Err(), IsNil) + c.Assert(sMove.Val(), Equals, true) + + sMembers := t.client.SMembers("set1") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), DeepEquals, []string{"one"}) + + sMembers = t.client.SMembers("set2") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"three", "two"}) +} + +func (t *RedisTest) TestSPop(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sPop := t.client.SPop("set") + c.Assert(sPop.Err(), IsNil) + c.Assert(sPop.Val(), Not(Equals), "") + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 2) +} + +func (t *RedisTest) TestSRandMember(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sRandMember := t.client.SRandMember("set") + c.Assert(sRandMember.Err(), IsNil) + c.Assert(sRandMember.Val(), Not(Equals), "") + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 3) +} + +func (t *RedisTest) TestSRem(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sRem := t.client.SRem("set", "one") + c.Assert(sRem.Err(), IsNil) + c.Assert(sRem.Val(), Equals, int64(1)) + + sRem = t.client.SRem("set", "four") + c.Assert(sRem.Err(), IsNil) + c.Assert(sRem.Val(), Equals, int64(0)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert( + sortStrings(sMembers.Val()), + DeepEquals, + []string{"three", "two"}, + ) +} + +func (t *RedisTest) TestSUnion(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sUnion := t.client.SUnion("set1", "set2") + c.Assert(sUnion.Err(), IsNil) + c.Assert(sUnion.Val(), HasLen, 5) +} + +func (t *RedisTest) TestSUnionStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sUnionStore := t.client.SUnionStore("set", "set1", "set2") + c.Assert(sUnionStore.Err(), IsNil) + c.Assert(sUnionStore.Val(), Equals, int64(5)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 5) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestZAdd(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{1, "uno"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{3, "two"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(0)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {1, "uno"}, {3, "two"}}) +} + +func (t *RedisTest) TestZCard(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zCard := t.client.ZCard("zset") + c.Assert(zCard.Err(), IsNil) + c.Assert(zCard.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestZCount(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zCount := t.client.ZCount("zset", "-inf", "+inf") + c.Assert(zCount.Err(), IsNil) + c.Assert(zCount.Val(), Equals, int64(3)) + + zCount = t.client.ZCount("zset", "(1", "3") + c.Assert(zCount.Err(), IsNil) + c.Assert(zCount.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestZIncrBy(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zIncrBy := t.client.ZIncrBy("zset", 2, "one") + c.Assert(zIncrBy.Err(), IsNil) + c.Assert(zIncrBy.Val(), Equals, float64(3)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "one"}}) +} + +func (t *RedisTest) TestZInterStore(c *C) { + zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset3", redis.Z{3, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zInterStore := t.client.ZInterStore( + "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2") + c.Assert(zInterStore.Err(), IsNil) + c.Assert(zInterStore.Val(), Equals, int64(2)) + + val, err := t.client.ZRangeWithScores("out", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {10, "two"}}) +} + +func (t *RedisTest) TestZRange(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRange := t.client.ZRange("zset", 0, -1) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + zRange = t.client.ZRange("zset", 2, 3) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"three"}) + + zRange = t.client.ZRange("zset", -2, -1) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestZRangeWithScores(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}) + + val, err = t.client.ZRangeWithScores("zset", 2, 3).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}}) + + val, err = t.client.ZRangeWithScores("zset", -2, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRangeByScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRangeByScore := t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two", "three"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "1", + Max: "2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"two"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "(2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestZRangeByScoreWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "1", + Max: "2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "(2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{}) +} + +func (t *RedisTest) TestZRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRank := t.client.ZRank("zset", "three") + c.Assert(zRank.Err(), IsNil) + c.Assert(zRank.Val(), Equals, int64(2)) + + zRank = t.client.ZRank("zset", "four") + c.Assert(zRank.Err(), Equals, redis.Nil) + c.Assert(zRank.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestZRem(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRem := t.client.ZRem("zset", "two") + c.Assert(zRem.Err(), IsNil) + c.Assert(zRem.Val(), Equals, int64(1)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRemRangeByRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRemRangeByRank := t.client.ZRemRangeByRank("zset", 0, 1) + c.Assert(zRemRangeByRank.Err(), IsNil) + c.Assert(zRemRangeByRank.Val(), Equals, int64(2)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}}) +} + +func (t *RedisTest) TestZRemRangeByScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRemRangeByScore := t.client.ZRemRangeByScore("zset", "-inf", "(2") + c.Assert(zRemRangeByScore.Err(), IsNil) + c.Assert(zRemRangeByScore.Val(), Equals, int64(1)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRevRange(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRevRange := t.client.ZRevRange("zset", "0", "-1") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"three", "two", "one"}) + + zRevRange = t.client.ZRevRange("zset", "2", "3") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"one"}) + + zRevRange = t.client.ZRevRange("zset", "-2", "-1") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"two", "one"}) +} + +func (t *RedisTest) TestZRevRangeWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRevRangeWithScores("zset", "0", "-1").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) + + val, err = t.client.ZRevRangeWithScores("zset", "2", "3").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}}) + + val, err = t.client.ZRevRangeWithScores("zset", "-2", "-1").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {1, "one"}}) +} + +func (t *RedisTest) TestZRevRangeByScore(c *C) { + zadd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zadd.Err(), IsNil) + + vals, err := t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{"three", "two", "one"}) + + vals, err = t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{"two"}) + + vals, err = t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{}) +} + +func (t *RedisTest) TestZRevRangeByScoreWithScores(c *C) { + zadd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zadd.Err(), IsNil) + + vals, err := t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) +} + +func (t *RedisTest) TestZRevRangeByScoreWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) + + val, err = t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}}) + + val, err = t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{}) +} + +func (t *RedisTest) TestZRevRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRevRank := t.client.ZRevRank("zset", "one") + c.Assert(zRevRank.Err(), IsNil) + c.Assert(zRevRank.Val(), Equals, int64(2)) + + zRevRank = t.client.ZRevRank("zset", "four") + c.Assert(zRevRank.Err(), Equals, redis.Nil) + c.Assert(zRevRank.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestZScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1.001, "one"}) + c.Assert(zAdd.Err(), IsNil) + + zScore := t.client.ZScore("zset", "one") + c.Assert(zScore.Err(), IsNil) + c.Assert(zScore.Val(), Equals, float64(1.001)) +} + +func (t *RedisTest) TestZUnionStore(c *C) { + zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zUnionStore := t.client.ZUnionStore( + "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2") + c.Assert(zUnionStore.Err(), IsNil) + c.Assert(zUnionStore.Val(), Equals, int64(3)) + + val, err := t.client.ZRangeWithScores("out", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {9, "three"}, {10, "two"}}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestPatternPubSub(c *C) { + pubsub := t.client.PubSub() + defer func() { + c.Assert(pubsub.Close(), IsNil) + }() + + c.Assert(pubsub.PSubscribe("mychannel*"), IsNil) + + pub := t.client.Publish("mychannel1", "hello") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + c.Assert(pubsub.PUnsubscribe("mychannel*"), IsNil) + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "psubscribe") + c.Assert(subscr.Channel, Equals, "mychannel*") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.PMessage) + c.Assert(subscr.Channel, Equals, "mychannel1") + c.Assert(subscr.Pattern, Equals, "mychannel*") + c.Assert(subscr.Payload, Equals, "hello") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "punsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel*") + c.Assert(subscr.Count, Equals, 0) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err.(net.Error).Timeout(), Equals, true) + c.Assert(msgi, IsNil) + } +} + +func (t *RedisTest) TestPubSub(c *C) { + pubsub := t.client.PubSub() + defer func() { + c.Assert(pubsub.Close(), IsNil) + }() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + pub := t.client.Publish("mychannel", "hello") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + pub = t.client.Publish("mychannel2", "hello2") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + c.Assert(pubsub.Unsubscribe("mychannel", "mychannel2"), IsNil) + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "subscribe") + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "subscribe") + c.Assert(subscr.Channel, Equals, "mychannel2") + c.Assert(subscr.Count, Equals, 2) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Message) + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Payload, Equals, "hello") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + msg := msgi.(*redis.Message) + c.Assert(msg.Channel, Equals, "mychannel2") + c.Assert(msg.Payload, Equals, "hello2") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "unsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "unsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel2") + c.Assert(subscr.Count, Equals, 0) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err.(net.Error).Timeout(), Equals, true) + c.Assert(msgi, IsNil) + } +} + +func (t *RedisTest) TestPubSubChannels(c *C) { + channels, err := t.client.PubSubChannels("mychannel*").Result() + c.Assert(err, IsNil) + c.Assert(channels, HasLen, 0) + c.Assert(channels, Not(IsNil)) + + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + channels, err = t.client.PubSubChannels("mychannel*").Result() + c.Assert(err, IsNil) + c.Assert(sortStrings(channels), DeepEquals, []string{"mychannel", "mychannel2"}) + + channels, err = t.client.PubSubChannels("").Result() + c.Assert(err, IsNil) + c.Assert(channels, HasLen, 0) + + channels, err = t.client.PubSubChannels("*").Result() + c.Assert(err, IsNil) + c.Assert(len(channels) >= 2, Equals, true) +} + +func (t *RedisTest) TestPubSubNumSub(c *C) { + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + channels, err := t.client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result() + c.Assert(err, IsNil) + c.Assert( + channels, + DeepEquals, + []interface{}{"mychannel", int64(1), "mychannel2", int64(1), "mychannel3", int64(0)}, + ) +} + +func (t *RedisTest) TestPubSubNumPat(c *C) { + num, err := t.client.PubSubNumPat().Result() + c.Assert(err, IsNil) + c.Assert(num, Equals, int64(0)) + + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.PSubscribe("mychannel*"), IsNil) + + num, err = t.client.PubSubNumPat().Result() + c.Assert(err, IsNil) + c.Assert(num, Equals, int64(1)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestPipeline(c *C) { + set := t.client.Set("key2", "hello2") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + set = pipeline.Set("key1", "hello1") + get := pipeline.Get("key2") + incr := pipeline.Incr("key3") + getNil := pipeline.Get("key4") + + cmds, err := pipeline.Exec() + c.Assert(err, Equals, redis.Nil) + c.Assert(cmds, HasLen, 4) + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello2") + + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(1)) + + c.Assert(getNil.Err(), Equals, redis.Nil) + c.Assert(getNil.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineDiscardQueued(c *C) { + pipeline := t.client.Pipeline() + + pipeline.Get("key") + pipeline.Discard() + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) + + c.Assert(pipeline.Close(), IsNil) +} + +func (t *RedisTest) TestPipelined(c *C) { + var get *redis.StringCmd + cmds, err := t.client.Pipelined(func(pipe *redis.Pipeline) error { + get = pipe.Get("foo") + return nil + }) + c.Assert(err, Equals, redis.Nil) + c.Assert(cmds, HasLen, 1) + c.Assert(cmds[0], Equals, get) + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineErrValNotSet(c *C) { + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + get := pipeline.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineRunQueuedOnEmptyQueue(c *C) { + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) +} + +// TODO: make thread safe? +func (t *RedisTest) TestPipelineIncr(c *C) { + const N = 20000 + key := "TestPipelineIncr" + + pipeline := t.client.Pipeline() + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + pipeline.Incr(key) + wg.Done() + } + wg.Wait() + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(len(cmds), Equals, 20000) + for _, cmd := range cmds { + if cmd.Err() != nil { + c.Errorf("got %v, expected nil", cmd.Err()) + } + } + + get := t.client.Get(key) + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, strconv.Itoa(N)) + + c.Assert(pipeline.Close(), IsNil) +} + +func (t *RedisTest) TestPipelineEcho(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func(i int) { + pipeline := t.client.Pipeline() + + msg1 := "echo" + strconv.Itoa(i) + msg2 := "echo" + strconv.Itoa(i+1) + + echo1 := pipeline.Echo(msg1) + echo2 := pipeline.Echo(msg2) + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 2) + + c.Assert(echo1.Err(), IsNil) + c.Assert(echo1.Val(), Equals, msg1) + + c.Assert(echo2.Err(), IsNil) + c.Assert(echo2.Val(), Equals, msg2) + + c.Assert(pipeline.Close(), IsNil) + + wg.Done() + }(i) + } + wg.Wait() +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestMultiExec(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + var ( + set *redis.StatusCmd + get *redis.StringCmd + ) + cmds, err := multi.Exec(func() error { + set = multi.Set("key", "hello") + get = multi.Get("key") + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 2) + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestMultiExecDiscard(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { + multi.Set("key1", "hello1") + multi.Discard() + multi.Set("key2", "hello2") + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + + get := t.client.Get("key1") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + get = t.client.Get("key2") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello2") +} + +func (t *RedisTest) TestMultiExecEmpty(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { return nil }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) + + ping := multi.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") +} + +func (t *RedisTest) TestMultiExecOnEmptyQueue(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { return nil }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) +} + +func (t *RedisTest) TestMultiExecIncr(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { + for i := int64(0); i < 20000; i++ { + multi.Incr("key") + } + return nil + }) + c.Assert(err, IsNil) + c.Assert(len(cmds), Equals, 20000) + for _, cmd := range cmds { + if cmd.Err() != nil { + c.Errorf("got %v, expected nil", cmd.Err()) + } + } + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "20000") +} + +func (t *RedisTest) transactionalIncr(c *C) ([]redis.Cmder, error) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + watch := multi.Watch("key") + c.Assert(watch.Err(), IsNil) + c.Assert(watch.Val(), Equals, "OK") + + get := multi.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Not(Equals), redis.Nil) + + v, err := strconv.ParseInt(get.Val(), 10, 64) + c.Assert(err, IsNil) + + return multi.Exec(func() error { + multi.Set("key", strconv.FormatInt(v+1, 10)) + return nil + }) +} + +func (t *RedisTest) TestWatchUnwatch(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + set := t.client.Set("key", "0") + c.Assert(set.Err(), IsNil) + + wg := &sync.WaitGroup{} + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + cmds, err := t.transactionalIncr(c) + if err == redis.TxFailedErr { + continue + } + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(cmds[0].Err(), IsNil) + break + } + }() + } + wg.Wait() + + val, err := t.client.Get("key").Int64() + c.Assert(err, IsNil) + c.Assert(val, Equals, int64(n)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestRaceEcho(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + wg := &sync.WaitGroup{} + wg.Add(n) + for i := 0; i < n; i++ { + go func(i int) { + msg := "echo" + strconv.Itoa(i) + echo := t.client.Echo(msg) + c.Assert(echo.Err(), IsNil) + c.Assert(echo.Val(), Equals, msg) + wg.Done() + }(i) + } + wg.Wait() +} + +func (t *RedisTest) TestRaceIncr(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + wg := &sync.WaitGroup{} + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + incr := t.client.Incr("TestRaceIncr") + if err := incr.Err(); err != nil { + panic(err) + } + wg.Done() + }() + } + wg.Wait() + + val, err := t.client.Get("TestRaceIncr").Result() + c.Assert(err, IsNil) + c.Assert(val, Equals, strconv.Itoa(n)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdBgRewriteAOF(c *C) { + r := t.client.BgRewriteAOF() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "Background append only file rewriting started") +} + +func (t *RedisTest) TestCmdBgSave(c *C) { + // workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress" + time.Sleep(time.Second) + + r := t.client.BgSave() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "Background saving started") +} + +func (t *RedisTest) TestCmdClientKill(c *C) { + r := t.client.ClientKill("1.1.1.1:1111") + c.Assert(r.Err(), ErrorMatches, "ERR No such client") + c.Assert(r.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdConfigGet(c *C) { + r := t.client.ConfigGet("*") + c.Assert(r.Err(), IsNil) + c.Assert(len(r.Val()) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdConfigResetStat(c *C) { + r := t.client.ConfigResetStat() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "OK") +} + +func (t *RedisTest) TestCmdConfigSet(c *C) { + configGet := t.client.ConfigGet("maxmemory") + c.Assert(configGet.Err(), IsNil) + c.Assert(configGet.Val(), HasLen, 2) + c.Assert(configGet.Val()[0], Equals, "maxmemory") + + configSet := t.client.ConfigSet("maxmemory", configGet.Val()[1].(string)) + c.Assert(configSet.Err(), IsNil) + c.Assert(configSet.Val(), Equals, "OK") +} + +func (t *RedisTest) TestCmdDbSize(c *C) { + dbSize := t.client.DbSize() + c.Assert(dbSize.Err(), IsNil) + c.Assert(dbSize.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestCmdFlushAll(c *C) { + // TODO +} + +func (t *RedisTest) TestCmdFlushDb(c *C) { + // TODO +} + +func (t *RedisTest) TestCmdInfo(c *C) { + info := t.client.Info() + c.Assert(info.Err(), IsNil) + c.Assert(info.Val(), Not(Equals), "") +} + +func (t *RedisTest) TestCmdLastSave(c *C) { + lastSave := t.client.LastSave() + c.Assert(lastSave.Err(), IsNil) + c.Assert(lastSave.Val(), Not(Equals), 0) +} + +func (t *RedisTest) TestCmdSave(c *C) { + save := t.client.Save() + c.Assert(save.Err(), IsNil) + c.Assert(save.Val(), Equals, "OK") +} + +func (t *RedisTest) TestSlaveOf(c *C) { + slaveOf := t.client.SlaveOf("localhost", "8888") + c.Assert(slaveOf.Err(), IsNil) + c.Assert(slaveOf.Val(), Equals, "OK") + + slaveOf = t.client.SlaveOf("NO", "ONE") + c.Assert(slaveOf.Err(), IsNil) + c.Assert(slaveOf.Val(), Equals, "OK") +} + +func (t *RedisTest) TestTime(c *C) { + time := t.client.Time() + c.Assert(time.Err(), IsNil) + c.Assert(time.Val(), HasLen, 2) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestScriptingEval(c *C) { + eval := t.client.Eval( + "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", + []string{"key1", "key2"}, + []string{"first", "second"}, + ) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), DeepEquals, []interface{}{"key1", "key2", "first", "second"}) + + eval = t.client.Eval( + "return redis.call('set',KEYS[1],'bar')", + []string{"foo"}, + []string{}, + ) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, "OK") + + eval = t.client.Eval("return 10", []string{}, []string{}) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, int64(10)) + + eval = t.client.Eval("return {1,2,{3,'Hello World!'}}", []string{}, []string{}) + c.Assert(eval.Err(), IsNil) + // DeepEquals can't compare nested slices. + c.Assert( + fmt.Sprintf("%#v", eval.Val()), + Equals, + `[]interface {}{1, 2, []interface {}{3, "Hello World!"}}`, + ) +} + +func (t *RedisTest) TestScriptingEvalSha(c *C) { + set := t.client.Set("foo", "bar") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + eval := t.client.Eval("return redis.call('get','foo')", nil, nil) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, "bar") + + evalSha := t.client.EvalSha("6b1bf486c81ceb7edf3c093f4c48582e38c0e791", nil, nil) + c.Assert(evalSha.Err(), IsNil) + c.Assert(evalSha.Val(), Equals, "bar") + + evalSha = t.client.EvalSha("ffffffffffffffffffffffffffffffffffffffff", nil, nil) + c.Assert(evalSha.Err(), ErrorMatches, "NOSCRIPT No matching script. Please use EVAL.") + c.Assert(evalSha.Val(), Equals, nil) +} + +func (t *RedisTest) TestScriptingScriptExists(c *C) { + scriptLoad := t.client.ScriptLoad("return 1") + c.Assert(scriptLoad.Err(), IsNil) + c.Assert(scriptLoad.Val(), Equals, "e0e1f9fabfc9d4800c877a703b823ac0578ff8db") + + scriptExists := t.client.ScriptExists( + "e0e1f9fabfc9d4800c877a703b823ac0578ff8db", + "ffffffffffffffffffffffffffffffffffffffff", + ) + c.Assert(scriptExists.Err(), IsNil) + c.Assert(scriptExists.Val(), DeepEquals, []bool{true, false}) +} + +func (t *RedisTest) TestScriptingScriptFlush(c *C) { + scriptFlush := t.client.ScriptFlush() + c.Assert(scriptFlush.Err(), IsNil) + c.Assert(scriptFlush.Val(), Equals, "OK") +} + +func (t *RedisTest) TestScriptingScriptKill(c *C) { + scriptKill := t.client.ScriptKill() + c.Assert(scriptKill.Err(), ErrorMatches, ".*No scripts in execution right now.") + c.Assert(scriptKill.Val(), Equals, "") +} + +func (t *RedisTest) TestScriptingScriptLoad(c *C) { + scriptLoad := t.client.ScriptLoad("return redis.call('get','foo')") + c.Assert(scriptLoad.Err(), IsNil) + c.Assert(scriptLoad.Val(), Equals, "6b1bf486c81ceb7edf3c093f4c48582e38c0e791") +} + +func (t *RedisTest) TestScriptingNewScript(c *C) { + s := redis.NewScript("return 1") + run := s.Run(t.client, nil, nil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +func (t *RedisTest) TestScriptingEvalAndPipeline(c *C) { + pipeline := t.client.Pipeline() + s := redis.NewScript("return 1") + run := s.Eval(pipeline, nil, nil) + _, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +func (t *RedisTest) TestScriptingEvalShaAndPipeline(c *C) { + s := redis.NewScript("return 1") + c.Assert(s.Load(t.client).Err(), IsNil) + + pipeline := t.client.Pipeline() + run := s.Eval(pipeline, nil, nil) + _, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdDebugObject(c *C) { + { + debug := t.client.DebugObject("foo") + c.Assert(debug.Err(), Not(IsNil)) + c.Assert(debug.Err().Error(), Equals, "ERR no such key") + } + + { + t.client.Set("foo", "bar") + debug := t.client.DebugObject("foo") + c.Assert(debug.Err(), IsNil) + c.Assert(debug.Val(), FitsTypeOf, "") + c.Assert(debug.Val(), Not(Equals), "") + } +} + +//------------------------------------------------------------------------------ + +func BenchmarkRedisPing(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Ping().Err(); err != nil { + panic(err) + } + } +} + +func BenchmarkRedisSet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Set("key", "hello").Err(); err != nil { + panic(err) + } + } +} + +func BenchmarkRedisGetNil(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.FlushDb().Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Get("key").Err(); err != redis.Nil { + b.Fatal(err) + } + } +} + +func BenchmarkRedisGet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.Set("key", "hello").Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Get("key").Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRedisMGet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.MGet("key1", "key2").Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSetExpire(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Set("key", "hello").Err(); err != nil { + b.Fatal(err) + } + if err := client.Expire("key", time.Second).Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkPipeline(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := client.Pipelined(func(pipe *redis.Pipeline) error { + pipe.Set("key", "hello") + pipe.Expire("key", time.Second) + return nil + }) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/script.go b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go new file mode 100644 index 000000000..96c35f514 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go @@ -0,0 +1,52 @@ +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +type scripter interface { + Eval(script string, keys []string, args []string) *Cmd + EvalSha(sha1 string, keys []string, args []string) *Cmd + ScriptExists(scripts ...string) *BoolSliceCmd + ScriptLoad(script string) *StringCmd +} + +type Script struct { + src, hash string +} + +func NewScript(src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{ + src: src, + hash: hex.EncodeToString(h.Sum(nil)), + } +} + +func (s *Script) Load(c scripter) *StringCmd { + return c.ScriptLoad(s.src) +} + +func (s *Script) Exists(c scripter) *BoolSliceCmd { + return c.ScriptExists(s.src) +} + +func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd { + return c.Eval(s.src, keys, args) +} + +func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd { + return c.EvalSha(s.hash, keys, args) +} + +func (s *Script) Run(c *Client, keys []string, args []string) *Cmd { + r := s.EvalSha(c, keys, args) + if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { + return s.Eval(c, keys, args) + } + return r +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go new file mode 100644 index 000000000..d3ffeca9a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go @@ -0,0 +1,291 @@ +package redis + +import ( + "errors" + "log" + "net" + "strings" + "sync" + "time" +) + +//------------------------------------------------------------------------------ + +type FailoverOptions struct { + MasterName string + SentinelAddrs []string + + Password string + DB int64 + + PoolSize int + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration +} + +func (opt *FailoverOptions) getPoolSize() int { + if opt.PoolSize == 0 { + return 10 + } + return opt.PoolSize +} + +func (opt *FailoverOptions) getDialTimeout() time.Duration { + if opt.DialTimeout == 0 { + return 5 * time.Second + } + return opt.DialTimeout +} + +func (opt *FailoverOptions) options() *options { + return &options{ + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.getDialTimeout(), + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.getPoolSize(), + IdleTimeout: opt.IdleTimeout, + } +} + +func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + opt := failoverOpt.options() + failover := &sentinelFailover{ + masterName: failoverOpt.MasterName, + sentinelAddrs: failoverOpt.SentinelAddrs, + + opt: opt, + } + return &Client{ + baseClient: &baseClient{ + opt: opt, + connPool: failover.Pool(), + }, + } +} + +//------------------------------------------------------------------------------ + +type sentinelClient struct { + *baseClient +} + +func newSentinel(clOpt *Options) *sentinelClient { + opt := clOpt.options() + opt.Password = "" + opt.DB = 0 + dialer := func() (net.Conn, error) { + return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout) + } + return &sentinelClient{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(newConnFunc(dialer), opt), + }, + } +} + +func (c *sentinelClient) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { + cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name) + c.Process(cmd) + return cmd +} + +func (c *sentinelClient) Sentinels(name string) *SliceCmd { + cmd := NewSliceCmd("SENTINEL", "sentinels", name) + c.Process(cmd) + return cmd +} + +type sentinelFailover struct { + masterName string + sentinelAddrs []string + + opt *options + + pool pool + poolOnce sync.Once + + lock sync.RWMutex + _sentinel *sentinelClient +} + +func (d *sentinelFailover) dial() (net.Conn, error) { + addr, err := d.MasterAddr() + if err != nil { + return nil, err + } + return net.DialTimeout("tcp", addr, d.opt.DialTimeout) +} + +func (d *sentinelFailover) Pool() pool { + d.poolOnce.Do(func() { + d.pool = newConnPool(newConnFunc(d.dial), d.opt) + }) + return d.pool +} + +func (d *sentinelFailover) MasterAddr() (string, error) { + defer d.lock.Unlock() + d.lock.Lock() + + // Try last working sentinel. + if d._sentinel != nil { + addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + d.resetSentinel() + } else { + addr := net.JoinHostPort(addr[0], addr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + } + + for i, sentinelAddr := range d.sentinelAddrs { + sentinel := newSentinel(&Options{ + Addr: sentinelAddr, + + DB: d.opt.DB, + Password: d.opt.Password, + + DialTimeout: d.opt.DialTimeout, + ReadTimeout: d.opt.ReadTimeout, + WriteTimeout: d.opt.WriteTimeout, + + PoolSize: d.opt.PoolSize, + IdleTimeout: d.opt.IdleTimeout, + }) + masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + sentinel.Close() + continue + } + + // Push working sentinel to the top. + d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] + + d.setSentinel(sentinel) + addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + + return "", errors.New("redis: all sentinels are unreachable") +} + +func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) { + d.discoverSentinels(sentinel) + d._sentinel = sentinel + go d.listen() +} + +func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { + sentinels, err := sentinel.Sentinels(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err) + return + } + for _, sentinel := range sentinels { + vals := sentinel.([]interface{}) + for i := 0; i < len(vals); i += 2 { + key := vals[i].(string) + if key == "name" { + sentinelAddr := vals[i+1].(string) + if !contains(d.sentinelAddrs, sentinelAddr) { + log.Printf( + "redis-sentinel: discovered new %q sentinel: %s", + d.masterName, sentinelAddr, + ) + d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) + } + } + } + } +} + +func (d *sentinelFailover) listen() { + var pubsub *PubSub + for { + if pubsub == nil { + pubsub = d._sentinel.PubSub() + if err := pubsub.Subscribe("+switch-master"); err != nil { + log.Printf("redis-sentinel: Subscribe failed: %s", err) + d.lock.Lock() + d.resetSentinel() + d.lock.Unlock() + return + } + } + + msgIface, err := pubsub.Receive() + if err != nil { + log.Printf("redis-sentinel: Receive failed: %s", err) + pubsub.Close() + return + } + + switch msg := msgIface.(type) { + case *Message: + switch msg.Channel { + case "+switch-master": + parts := strings.Split(msg.Payload, " ") + if parts[0] != d.masterName { + log.Printf("redis-sentinel: ignore new %s addr", parts[0]) + continue + } + addr := net.JoinHostPort(parts[3], parts[4]) + log.Printf( + "redis-sentinel: new %q addr is %s", + d.masterName, addr, + ) + d.pool.Filter(func(cn *conn) bool { + if cn.RemoteAddr().String() != addr { + log.Printf( + "redis-sentinel: closing connection to old master %s", + cn.RemoteAddr(), + ) + return false + } + return true + }) + default: + log.Printf("redis-sentinel: unsupported message: %s", msg) + } + case *Subscription: + // Ignore. + default: + log.Printf("redis-sentinel: unsupported message: %s", msgIface) + } + } +} + +func (d *sentinelFailover) resetSentinel() { + d._sentinel.Close() + d._sentinel = nil +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go new file mode 100644 index 000000000..ede59bd51 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go @@ -0,0 +1,185 @@ +package redis_test + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + "text/template" + "time" + + "gopkg.in/redis.v2" +) + +func startRedis(port string) (*exec.Cmd, error) { + cmd := exec.Command("redis-server", "--port", port) + if false { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + return cmd, nil +} + +func startRedisSlave(port, slave string) (*exec.Cmd, error) { + cmd := exec.Command("redis-server", "--port", port, "--slaveof", "127.0.0.1", slave) + if false { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + return cmd, nil +} + +func startRedisSentinel(port, masterName, masterPort string) (*exec.Cmd, error) { + dir, err := ioutil.TempDir("", "sentinel") + if err != nil { + return nil, err + } + + sentinelConfFilepath := filepath.Join(dir, "sentinel.conf") + tpl, err := template.New("sentinel.conf").Parse(sentinelConf) + if err != nil { + return nil, err + } + + data := struct { + Port string + MasterName string + MasterPort string + }{ + Port: port, + MasterName: masterName, + MasterPort: masterPort, + } + if err := writeTemplateToFile(sentinelConfFilepath, tpl, data); err != nil { + return nil, err + } + + cmd := exec.Command("redis-server", sentinelConfFilepath, "--sentinel") + if true { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + + return cmd, nil +} + +func writeTemplateToFile(path string, t *template.Template, data interface{}) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + return t.Execute(f, data) +} + +func TestSentinel(t *testing.T) { + masterName := "mymaster" + masterPort := "8123" + slavePort := "8124" + sentinelPort := "8125" + + masterCmd, err := startRedis(masterPort) + if err != nil { + t.Fatal(err) + } + defer masterCmd.Process.Kill() + + // Wait for master to start. + time.Sleep(200 * time.Millisecond) + + master := redis.NewTCPClient(&redis.Options{ + Addr: ":" + masterPort, + }) + if err := master.Ping().Err(); err != nil { + t.Fatal(err) + } + + slaveCmd, err := startRedisSlave(slavePort, masterPort) + if err != nil { + t.Fatal(err) + } + defer slaveCmd.Process.Kill() + + // Wait for slave to start. + time.Sleep(200 * time.Millisecond) + + slave := redis.NewTCPClient(&redis.Options{ + Addr: ":" + slavePort, + }) + if err := slave.Ping().Err(); err != nil { + t.Fatal(err) + } + + sentinelCmd, err := startRedisSentinel(sentinelPort, masterName, masterPort) + if err != nil { + t.Fatal(err) + } + defer sentinelCmd.Process.Kill() + + // Wait for sentinel to start. + time.Sleep(200 * time.Millisecond) + + sentinel := redis.NewTCPClient(&redis.Options{ + Addr: ":" + sentinelPort, + }) + if err := sentinel.Ping().Err(); err != nil { + t.Fatal(err) + } + defer sentinel.Shutdown() + + client := redis.NewFailoverClient(&redis.FailoverOptions{ + MasterName: masterName, + SentinelAddrs: []string{":" + sentinelPort}, + }) + + if err := client.Set("foo", "master").Err(); err != nil { + t.Fatal(err) + } + + val, err := master.Get("foo").Result() + if err != nil { + t.Fatal(err) + } + if val != "master" { + t.Fatalf(`got %q, expected "master"`, val) + } + + // Kill Redis master. + if err := masterCmd.Process.Kill(); err != nil { + t.Fatal(err) + } + if err := master.Ping().Err(); err == nil { + t.Fatalf("master was not killed") + } + + // Wait for Redis sentinel to elect new master. + time.Sleep(5 * time.Second) + + // Check that client picked up new master. + val, err = client.Get("foo").Result() + if err != nil { + t.Fatal(err) + } + if val != "master" { + t.Fatalf(`got %q, expected "master"`, val) + } +} + +var sentinelConf = ` +port {{ .Port }} + +sentinel monitor {{ .MasterName }} 127.0.0.1 {{ .MasterPort }} 1 +sentinel down-after-milliseconds {{ .MasterName }} 1000 +sentinel failover-timeout {{ .MasterName }} 2000 +sentinel parallel-syncs {{ .MasterName }} 1 +` diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf new file mode 100644 index 000000000..3da90b380 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf @@ -0,0 +1,6 @@ +port 26379 + +sentinel monitor master 127.0.0.1 6379 1 +sentinel down-after-milliseconds master 2000 +sentinel failover-timeout master 5000 +sentinel parallel-syncs master 4 -- cgit v1.2.3-1-g7c22