summaryrefslogtreecommitdiffstats
path: root/vendor/google.golang.org
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2018-01-29 14:17:40 -0800
committerGitHub <noreply@github.com>2018-01-29 14:17:40 -0800
commit961c04cae992eadb42d286d2f85f8a675bdc68c8 (patch)
tree3408f2d06f847e966c53485e2d54c692cdd037c1 /vendor/google.golang.org
parent8d66523ba7d9a77129844be476732ebfd5272d64 (diff)
downloadchat-961c04cae992eadb42d286d2f85f8a675bdc68c8.tar.gz
chat-961c04cae992eadb42d286d2f85f8a675bdc68c8.tar.bz2
chat-961c04cae992eadb42d286d2f85f8a675bdc68c8.zip
Upgrading server dependancies (#8154)
Diffstat (limited to 'vendor/google.golang.org')
-rw-r--r--vendor/google.golang.org/appengine/.travis.yml24
-rw-r--r--vendor/google.golang.org/appengine/CONTRIBUTING.md90
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/README.md73
-rw-r--r--vendor/google.golang.org/appengine/aetest/doc.go42
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance.go55
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_classic.go21
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_test.go119
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_vm.go282
-rw-r--r--vendor/google.golang.org/appengine/aetest/user.go36
-rw-r--r--vendor/google.golang.org/appengine/appengine.go113
-rw-r--r--vendor/google.golang.org/appengine/appengine_test.go49
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go20
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore.go276
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore_test.go183
-rw-r--r--vendor/google.golang.org/appengine/blobstore/read.go160
-rw-r--r--vendor/google.golang.org/appengine/capability/capability.go52
-rw-r--r--vendor/google.golang.org/appengine/channel/channel.go87
-rw-r--r--vendor/google.golang.org/appengine/channel/channel_test.go21
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql.go62
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go17
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go16
-rw-r--r--vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go342
-rw-r--r--vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go72
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae.go185
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae_test.go144
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/fix.go848
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main.go258
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main_test.go129
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/typecheck.go673
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore.go407
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore_test.go1744
-rw-r--r--vendor/google.golang.org/appengine/datastore/doc.go361
-rw-r--r--vendor/google.golang.org/appengine/datastore/key.go309
-rw-r--r--vendor/google.golang.org/appengine/datastore/key_test.go204
-rw-r--r--vendor/google.golang.org/appengine/datastore/load.go429
-rw-r--r--vendor/google.golang.org/appengine/datastore/load_test.go656
-rw-r--r--vendor/google.golang.org/appengine/datastore/metadata.go78
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop.go330
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop_test.go547
-rw-r--r--vendor/google.golang.org/appengine/datastore/query.go724
-rw-r--r--vendor/google.golang.org/appengine/datastore/query_test.go583
-rw-r--r--vendor/google.golang.org/appengine/datastore/save.go327
-rw-r--r--vendor/google.golang.org/appengine/datastore/time_test.go65
-rw-r--r--vendor/google.golang.org/appengine/datastore/transaction.go87
-rw-r--r--vendor/google.golang.org/appengine/delay/delay.go295
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_go17.go23
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_go17_test.go55
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_pre17.go19
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_test.go428
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/app.yaml14
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/guestbook.go109
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/index.yaml7
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html26
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/app.yaml10
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/helloworld.go50
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/file/file.go28
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/image/image.go67
-rw-r--r--vendor/google.golang.org/appengine/internal/aetesting/fake.go81
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go660
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/api_pre17.go682
-rw-r--r--vendor/google.golang.org/appengine/internal/api_race_test.go9
-rw-r--r--vendor/google.golang.org/appengine/internal/api_test.go466
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id_test.go34
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go296
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go133
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go347
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto71
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go125
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.proto28
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go154
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.proto30
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go2778
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto541
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go57
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go101
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.pb.go845
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.proto162
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/internal_vm_test.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go899
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go229
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.proto45
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go15
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go48
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go938
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto165
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go375
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/net_test.go58
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go231
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.pb.go2488
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.proto394
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go1858
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.proto460
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.pb.go198
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.proto49
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go1888
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto342
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go107
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go355
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.pb.go289
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.proto58
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go427
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto83
-rw-r--r--vendor/google.golang.org/appengine/log/api.go40
-rw-r--r--vendor/google.golang.org/appengine/log/log.go323
-rw-r--r--vendor/google.golang.org/appengine/log/log_test.go112
-rw-r--r--vendor/google.golang.org/appengine/mail/mail.go123
-rw-r--r--vendor/google.golang.org/appengine/mail/mail_test.go65
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache.go526
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache_test.go263
-rw-r--r--vendor/google.golang.org/appengine/module/module.go113
-rw-r--r--vendor/google.golang.org/appengine/module/module_test.go124
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/namespace_test.go39
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client.go194
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client_test.go43
-rw-r--r--vendor/google.golang.org/appengine/remote_api/remote_api.go152
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime.go148
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime_test.go101
-rw-r--r--vendor/google.golang.org/appengine/search/doc.go209
-rw-r--r--vendor/google.golang.org/appengine/search/field.go82
-rw-r--r--vendor/google.golang.org/appengine/search/search.go1189
-rw-r--r--vendor/google.golang.org/appengine/search/search_test.go1270
-rw-r--r--vendor/google.golang.org/appengine/search/struct.go251
-rw-r--r--vendor/google.golang.org/appengine/search/struct_test.go213
-rw-r--r--vendor/google.golang.org/appengine/socket/doc.go10
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_classic.go290
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_vm.go64
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue.go541
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go173
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/google.golang.org/appengine/user/oauth.go52
-rw-r--r--vendor/google.golang.org/appengine/user/user.go84
-rw-r--r--vendor/google.golang.org/appengine/user/user_classic.go44
-rw-r--r--vendor/google.golang.org/appengine/user/user_test.go99
-rw-r--r--vendor/google.golang.org/appengine/user/user_vm.go38
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp.go253
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp_test.go173
157 files changed, 41172 insertions, 0 deletions
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 000000000..7ef8b6c7f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+
+go_import_path: google.golang.org/appengine
+
+install:
+ - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine)
+ - mkdir /tmp/sdk
+ - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip"
+ - unzip -q /tmp/sdk.zip -d /tmp/sdk
+ - export PATH="$PATH:/tmp/sdk/go_appengine"
+ - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
+
+script:
+ - goapp version
+ - go version
+ - go test -v google.golang.org/appengine/...
+ - go test -v -race google.golang.org/appengine/...
+ - goapp test -v google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
new file mode 100644
index 000000000..ffc298520
--- /dev/null
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -0,0 +1,90 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. Get the package:
+
+ `go get -d google.golang.org/appengine`
+1. Change into the checked out source:
+
+ `cd $GOPATH/src/google.golang.org/appengine`
+1. Fork the repo.
+1. Set your fork as a remote:
+
+ `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
+1. Make changes, commit to your fork.
+1. Send a pull request with your changes.
+ The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
+
+# Testing
+
+## Running system tests
+
+Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
+
+Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
+
+Run tests with `goapp test`:
+
+```
+goapp test -v google.golang.org/appengine/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 000000000..d86768a2c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on *App Engine standard*.
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [GitHub's issue
+tracker](https://github.com/golang/appengine/issues).
+
+## Upgrading an App Engine app to the flexible environment
+
+This package does not work on *App Engine flexible*.
+
+There are many differences between the App Engine standard environment and
+the flexible environment.
+
+See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
+
+## Directory structure
+
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating from legacy (`import "appengine"`) packages
+
+If you're currently using the bare `appengine` packages
+(that is, not these ones, imported via `google.golang.org/appengine`),
+then you can use the `aefix` tool to help automate an upgrade to these packages.
+
+Run `go get google.golang.org/appengine/cmd/aefix` to install it.
+
+### 1. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 2. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and there are some differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+ `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+ Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
+ feature you require is not present in the new
+ [blobstore package](https://google.golang.org/appengine/blobstore).
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+ Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go
new file mode 100644
index 000000000..86ce8c2c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/doc.go
@@ -0,0 +1,42 @@
+/*
+Package aetest provides an API for running dev_appserver for use in tests.
+
+An example test file:
+
+ package foo_test
+
+ import (
+ "testing"
+
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/aetest"
+ )
+
+ func TestFoo(t *testing.T) {
+ ctx, done, err := aetest.NewContext()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer done()
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value" ; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+ }
+
+The environment variable APPENGINE_DEV_APPSERVER specifies the location of the
+dev_appserver.py executable to use. If unset, the system PATH is consulted.
+*/
+package aetest
diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go
new file mode 100644
index 000000000..77323f751
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance.go
@@ -0,0 +1,55 @@
+package aetest
+
+import (
+ "io"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+)
+
+// Instance represents a running instance of the development API Server.
+type Instance interface {
+ // Close kills the child api_server.py process, releasing its resources.
+ io.Closer
+ // NewRequest returns an *http.Request associated with this instance.
+ NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
+}
+
+// Options is used to specify options when creating an Instance.
+type Options struct {
+ // AppID specifies the App ID to use during tests.
+ // By default, "testapp".
+ AppID string
+ // StronglyConsistentDatastore is whether the local datastore should be
+ // strongly consistent. This will diverge from production behaviour.
+ StronglyConsistentDatastore bool
+ // StartupTimeout is a duration to wait for instance startup.
+ // By default, 15 seconds.
+ StartupTimeout time.Duration
+}
+
+// NewContext starts an instance of the development API server, and returns
+// a context that will route all API calls to that server, as well as a
+// closure that must be called when the Context is no longer required.
+func NewContext() (context.Context, func(), error) {
+ inst, err := NewInstance(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := inst.NewRequest("GET", "/", nil)
+ if err != nil {
+ inst.Close()
+ return nil, nil, err
+ }
+ ctx := appengine.NewContext(req)
+ return ctx, func() {
+ inst.Close()
+ }, nil
+}
+
+// PrepareDevAppserver is a hook which, if set, will be called before the
+// dev_appserver.py is started, each time it is started. If aetest.NewContext
+// is invoked from the goapp test tool, this hook is unnecessary.
+var PrepareDevAppserver func() error
diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go
new file mode 100644
index 000000000..fbceaa505
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go
@@ -0,0 +1,21 @@
+// +build appengine
+
+package aetest
+
+import "appengine/aetest"
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ aetest.PrepareDevAppserver = PrepareDevAppserver
+ var aeOpts *aetest.Options
+ if opts != nil {
+ aeOpts = &aetest.Options{
+ AppID: opts.AppID,
+ StronglyConsistentDatastore: opts.StronglyConsistentDatastore,
+ }
+ }
+ return aetest.NewInstance(aeOpts)
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_test.go b/vendor/google.golang.org/appengine/aetest/instance_test.go
new file mode 100644
index 000000000..e7003afd9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_test.go
@@ -0,0 +1,119 @@
+package aetest
+
+import (
+ "os"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/user"
+)
+
+func TestBasicAPICalls(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+ resetEnv := internal.SetTestEnv()
+ defer resetEnv()
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value"; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+
+ type Entity struct{ Value string }
+ e := &Entity{Value: "foo"}
+ k := datastore.NewIncompleteKey(ctx, "Entity", nil)
+ k, err = datastore.Put(ctx, k, e)
+ if err != nil {
+ t.Fatalf("datastore.Put: %v", err)
+ }
+ e = new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ t.Fatalf("datastore.Get: %v", err)
+ }
+ if g, w := e.Value, "foo"; g != w {
+ t.Errorf("retrieved Entity.Value = %q, want %q", g, w)
+ }
+}
+
+func TestContext(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ // Check that the context methods work.
+ _, done, err := NewContext()
+ if err != nil {
+ t.Fatalf("NewContext: %v", err)
+ }
+ done()
+}
+
+func TestUsers(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current initially %v, want nil", user)
+ }
+
+ u := &user.User{
+ Email: "gopher@example.com",
+ Admin: true,
+ }
+ Login(u, req)
+
+ if got := user.Current(ctx); got.Email != u.Email {
+ t.Errorf("user.Current: %v, want %v", got, u)
+ }
+ if admin := user.IsAdmin(ctx); !admin {
+ t.Errorf("user.IsAdmin: %t, want true", admin)
+ }
+
+ Logout(req)
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current after logout %v, want nil", user)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go
new file mode 100644
index 000000000..829979000
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go
@@ -0,0 +1,282 @@
+// +build !appengine
+
+package aetest
+
+import (
+ "bufio"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+)
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ i := &instance{
+ opts: opts,
+ appID: "testapp",
+ startupTimeout: 15 * time.Second,
+ }
+ if opts != nil {
+ if opts.AppID != "" {
+ i.appID = opts.AppID
+ }
+ if opts.StartupTimeout > 0 {
+ i.startupTimeout = opts.StartupTimeout
+ }
+ }
+ if err := i.startChild(); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func newSessionID() string {
+ var buf [16]byte
+ io.ReadFull(rand.Reader, buf[:])
+ return fmt.Sprintf("%x", buf[:])
+}
+
+// instance implements the Instance interface.
+type instance struct {
+ opts *Options
+ child *exec.Cmd
+ apiURL *url.URL // base URL of API HTTP server
+ adminURL string // base URL of admin HTTP server
+ appDir string
+ appID string
+ startupTimeout time.Duration
+ relFuncs []func() // funcs to release any associated contexts
+}
+
+// NewRequest returns an *http.Request associated with this instance.
+func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Associate this request.
+ req, release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {
+ ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID)
+ return ctx
+ })
+ i.relFuncs = append(i.relFuncs, release)
+
+ return req, nil
+}
+
+// Close kills the child api_server.py process, releasing its resources.
+func (i *instance) Close() (err error) {
+ for _, rel := range i.relFuncs {
+ rel()
+ }
+ i.relFuncs = nil
+ child := i.child
+ if child == nil {
+ return nil
+ }
+ defer func() {
+ i.child = nil
+ err1 := os.RemoveAll(i.appDir)
+ if err == nil {
+ err = err1
+ }
+ }()
+
+ if p := child.Process; p != nil {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- child.Wait()
+ }()
+
+ // Call the quit handler on the admin server.
+ res, err := http.Get(i.adminURL + "/quit")
+ if err != nil {
+ p.Kill()
+ return fmt.Errorf("unable to call /quit handler: %v", err)
+ }
+ res.Body.Close()
+ select {
+ case <-time.After(15 * time.Second):
+ p.Kill()
+ return errors.New("timeout killing child process")
+ case err = <-errc:
+ // Do nothing.
+ }
+ }
+ return
+}
+
+func fileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func findPython() (path string, err error) {
+ for _, name := range []string{"python2.7", "python"} {
+ path, err = exec.LookPath(name)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func findDevAppserver() (string, error) {
+ if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" {
+ if fileExists(p) {
+ return p, nil
+ }
+ return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p)
+ }
+ return exec.LookPath("dev_appserver.py")
+}
+
+var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`)
+var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`)
+
+func (i *instance) startChild() (err error) {
+ if PrepareDevAppserver != nil {
+ if err := PrepareDevAppserver(); err != nil {
+ return err
+ }
+ }
+ python, err := findPython()
+ if err != nil {
+ return fmt.Errorf("Could not find python interpreter: %v", err)
+ }
+ devAppserver, err := findDevAppserver()
+ if err != nil {
+ return fmt.Errorf("Could not find dev_appserver.py: %v", err)
+ }
+
+ i.appDir, err = ioutil.TempDir("", "appengine-aetest")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(i.appDir)
+ }
+ }()
+ err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644)
+ if err != nil {
+ return err
+ }
+
+ appserverArgs := []string{
+ devAppserver,
+ "--port=0",
+ "--api_port=0",
+ "--admin_port=0",
+ "--automatic_restart=false",
+ "--skip_sdk_update_check=true",
+ "--clear_datastore=true",
+ "--clear_search_indexes=true",
+ "--datastore_path", filepath.Join(i.appDir, "datastore"),
+ }
+ if i.opts != nil && i.opts.StronglyConsistentDatastore {
+ appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent")
+ }
+ appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app"))
+
+ i.child = exec.Command(python,
+ appserverArgs...,
+ )
+ i.child.Stdout = os.Stdout
+ var stderr io.Reader
+ stderr, err = i.child.StderrPipe()
+ if err != nil {
+ return err
+ }
+ stderr = io.TeeReader(stderr, os.Stderr)
+ if err = i.child.Start(); err != nil {
+ return err
+ }
+
+ // Read stderr until we have read the URLs of the API server and admin interface.
+ errc := make(chan error, 1)
+ go func() {
+ s := bufio.NewScanner(stderr)
+ for s.Scan() {
+ if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ u, err := url.Parse(match[1])
+ if err != nil {
+ errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err)
+ return
+ }
+ i.apiURL = u
+ }
+ if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ i.adminURL = match[1]
+ }
+ if i.adminURL != "" && i.apiURL != nil {
+ break
+ }
+ }
+ errc <- s.Err()
+ }()
+
+ select {
+ case <-time.After(i.startupTimeout):
+ if p := i.child.Process; p != nil {
+ p.Kill()
+ }
+ return errors.New("timeout starting child process")
+ case err := <-errc:
+ if err != nil {
+ return fmt.Errorf("error reading child process stderr: %v", err)
+ }
+ }
+ if i.adminURL == "" {
+ return errors.New("unable to find admin server URL")
+ }
+ if i.apiURL == nil {
+ return errors.New("unable to find API server URL")
+ }
+ return nil
+}
+
+func (i *instance) appYAML() string {
+ return fmt.Sprintf(appYAMLTemplate, i.appID)
+}
+
+const appYAMLTemplate = `
+application: %s
+version: 1
+runtime: go
+api_version: go1
+
+handlers:
+- url: /.*
+ script: _go_app
+`
+
+const appSource = `
+package main
+import "google.golang.org/appengine"
+func main() { appengine.Main() }
+`
diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go
new file mode 100644
index 000000000..bf9266f53
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/user.go
@@ -0,0 +1,36 @@
+package aetest
+
+import (
+ "hash/crc32"
+ "net/http"
+ "strconv"
+
+ "google.golang.org/appengine/user"
+)
+
+// Login causes the provided Request to act as though issued by the given user.
+func Login(u *user.User, req *http.Request) {
+ req.Header.Set("X-AppEngine-User-Email", u.Email)
+ id := u.ID
+ if id == "" {
+ id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))
+ }
+ req.Header.Set("X-AppEngine-User-Id", id)
+ req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email)
+ req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider)
+ if u.Admin {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "1")
+ } else {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "0")
+ }
+}
+
+// Logout causes the provided Request to act as though issued by a logged-out
+// user.
+func Logout(req *http.Request) {
+ req.Header.Del("X-AppEngine-User-Email")
+ req.Header.Del("X-AppEngine-User-Id")
+ req.Header.Del("X-AppEngine-User-Is-Admin")
+ req.Header.Del("X-AppEngine-User-Federated-Identity")
+ req.Header.Del("X-AppEngine-User-Federated-Provider")
+}
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 000000000..76dedc81d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,113 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine.
+//
+// On App Engine Flexible it installs a trivial health checker if one isn't
+// already registered, and starts listening on port 8080 (overridden by the
+// $PORT environment variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// On App Engine Standard it ensures the server has started and is prepared to
+// receive requests.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// appengine.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ internal.Main()
+}
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+ return internal.ReqContext(req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+ return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+ return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+ return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go
new file mode 100644
index 000000000..f1cf0a1b9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_test.go
@@ -0,0 +1,49 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+)
+
+func TestValidGeoPoint(t *testing.T) {
+ testCases := []struct {
+ desc string
+ pt GeoPoint
+ want bool
+ }{
+ {
+ "valid",
+ GeoPoint{67.21, 13.37},
+ true,
+ },
+ {
+ "high lat",
+ GeoPoint{-90.01, 13.37},
+ false,
+ },
+ {
+ "low lat",
+ GeoPoint{90.01, 13.37},
+ false,
+ },
+ {
+ "high lng",
+ GeoPoint{67.21, 182},
+ false,
+ },
+ {
+ "low lng",
+ GeoPoint{67.21, -181},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ if got := tc.pt.Valid(); got != tc.want {
+ t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 000000000..f4b645aad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+ return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go
new file mode 100644
index 000000000..1c8087b04
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore.go
@@ -0,0 +1,276 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package blobstore provides a client for App Engine's persistent blob
+// storage service.
+package blobstore // import "google.golang.org/appengine/blobstore"
+
+import (
+ "bufio"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/internal"
+
+ basepb "google.golang.org/appengine/internal/base"
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+const (
+ blobInfoKind = "__BlobInfo__"
+ blobFileIndexKind = "__BlobFileIndex__"
+ zeroKey = appengine.BlobKey("")
+)
+
+// BlobInfo is the blob metadata that is stored in the datastore.
+// Filename may be empty.
+type BlobInfo struct {
+ BlobKey appengine.BlobKey
+ ContentType string `datastore:"content_type"`
+ CreationTime time.Time `datastore:"creation"`
+ Filename string `datastore:"filename"`
+ Size int64 `datastore:"size"`
+ MD5 string `datastore:"md5_hash"`
+
+ // ObjectName is the Google Cloud Storage name for this blob.
+ ObjectName string `datastore:"gs_object_name"`
+}
+
+// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch.
+//
+// The blobstore stores blob metadata in the datastore. When loading that
+// metadata, it may contain fields that we don't care about. datastore.Get will
+// return datastore.ErrFieldMismatch in that case, so we ignore that specific
+// error.
+func isErrFieldMismatch(err error) bool {
+ _, ok := err.(*datastore.ErrFieldMismatch)
+ return ok
+}
+
+// Stat returns the BlobInfo for a provided blobKey. If no blob was found for
+// that key, Stat returns datastore.ErrNoSuchEntity.
+func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) {
+ c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace
+ dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil)
+ bi := &BlobInfo{
+ BlobKey: blobKey,
+ }
+ if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) {
+ return nil, err
+ }
+ return bi, nil
+}
+
+// Send sets the headers on response to instruct App Engine to send a blob as
+// the response body. This is more efficient than reading and writing it out
+// manually and isn't subject to normal response size limits.
+func Send(response http.ResponseWriter, blobKey appengine.BlobKey) {
+ hdr := response.Header()
+ hdr.Set("X-AppEngine-BlobKey", string(blobKey))
+
+ if hdr.Get("Content-Type") == "" {
+ // This value is known to dev_appserver to mean automatic.
+ // In production this is remapped to the empty value which
+ // means automatic.
+ hdr.Set("Content-Type", "application/vnd.google.appengine.auto")
+ }
+}
+
+// UploadURL creates an upload URL for the form that the user will
+// fill out, passing the application path to load when the POST of the
+// form is completed. These URLs expire and should not be reused. The
+// opts parameter may be nil.
+func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) {
+ req := &blobpb.CreateUploadURLRequest{
+ SuccessPath: proto.String(successPath),
+ }
+ if opts != nil {
+ if n := opts.MaxUploadBytes; n != 0 {
+ req.MaxUploadSizeBytes = &n
+ }
+ if n := opts.MaxUploadBytesPerBlob; n != 0 {
+ req.MaxUploadSizePerBlobBytes = &n
+ }
+ if s := opts.StorageBucket; s != "" {
+ req.GsBucketName = &s
+ }
+ }
+ res := &blobpb.CreateUploadURLResponse{}
+ if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil {
+ return nil, err
+ }
+ return url.Parse(*res.Url)
+}
+
+// UploadURLOptions are the options to create an upload URL.
+type UploadURLOptions struct {
+ MaxUploadBytes int64 // optional
+ MaxUploadBytesPerBlob int64 // optional
+
+ // StorageBucket specifies the Google Cloud Storage bucket in which
+ // to store the blob.
+ // This is required if you use Cloud Storage instead of Blobstore.
+ // Your application must have permission to write to the bucket.
+ // You may optionally specify a bucket name and path in the format
+ // "bucket_name/path", in which case the included path will be the
+ // prefix of the uploaded object's name.
+ StorageBucket string
+}
+
+// Delete deletes a blob.
+func Delete(c context.Context, blobKey appengine.BlobKey) error {
+ return DeleteMulti(c, []appengine.BlobKey{blobKey})
+}
+
+// DeleteMulti deletes multiple blobs.
+func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error {
+ s := make([]string, len(blobKey))
+ for i, b := range blobKey {
+ s[i] = string(b)
+ }
+ req := &blobpb.DeleteBlobRequest{
+ BlobKey: s,
+ }
+ res := &basepb.VoidProto{}
+ if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func errorf(format string, args ...interface{}) error {
+ return fmt.Errorf("blobstore: "+format, args...)
+}
+
+// ParseUpload parses the synthetic POST request that your app gets from
+// App Engine after a user's successful upload of blobs. Given the request,
+// ParseUpload returns a map of the blobs received (keyed by HTML form
+// element name) and other non-blob POST parameters.
+func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) {
+ _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ boundary := params["boundary"]
+ if boundary == "" {
+ return nil, nil, errorf("did not find MIME multipart boundary")
+ }
+
+ blobs = make(map[string][]*BlobInfo)
+ other = make(url.Values)
+
+ mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary)
+ for {
+ part, perr := mreader.NextPart()
+ if perr == io.EOF {
+ break
+ }
+ if perr != nil {
+ return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v",
+ boundary, len(boundary), perr)
+ }
+
+ bi := &BlobInfo{}
+ ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.Filename = params["filename"]
+ formKey := params["name"]
+
+ ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.BlobKey = appengine.BlobKey(params["blob-key"])
+ if ctype != "message/external-body" || bi.BlobKey == "" {
+ if formKey != "" {
+ slurp, serr := ioutil.ReadAll(part)
+ if serr != nil {
+ return nil, nil, errorf("error reading %q MIME part", formKey)
+ }
+ other[formKey] = append(other[formKey], string(slurp))
+ }
+ continue
+ }
+
+ // App Engine sends a MIME header as the body of each MIME part.
+ tp := textproto.NewReader(bufio.NewReader(part))
+ header, mimeerr := tp.ReadMIMEHeader()
+ if mimeerr != nil {
+ return nil, nil, mimeerr
+ }
+ bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.ContentType = header.Get("Content-Type")
+
+ // Parse the time from the MIME header like:
+ // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136
+ createDate := header.Get("X-AppEngine-Upload-Creation")
+ if createDate == "" {
+ return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header")
+ }
+ bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate)
+ if err != nil {
+ return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err)
+ }
+
+ if hdr := header.Get("Content-MD5"); hdr != "" {
+ md5, err := base64.URLEncoding.DecodeString(hdr)
+ if err != nil {
+ return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err)
+ }
+ bi.MD5 = string(md5)
+ }
+
+ // If the GCS object name was provided, record it.
+ bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object")
+
+ blobs[formKey] = append(blobs[formKey], bi)
+ }
+ return
+}
+
+// Reader is a blob reader.
+type Reader interface {
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+}
+
+// NewReader returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func NewReader(c context.Context, blobKey appengine.BlobKey) Reader {
+ return openBlob(c, blobKey)
+}
+
+// BlobKeyForFile returns a BlobKey for a Google Storage file.
+// The filename should be of the form "/gs/bucket_name/object_name".
+func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) {
+ req := &blobpb.CreateEncodedGoogleStorageKeyRequest{
+ Filename: &filename,
+ }
+ res := &blobpb.CreateEncodedGoogleStorageKeyResponse{}
+ if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil {
+ return "", err
+ }
+ return appengine.BlobKey(*res.BlobKey), nil
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore_test.go b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
new file mode 100644
index 000000000..c2be7ef9b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
@@ -0,0 +1,183 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+
+ pb "google.golang.org/appengine/internal/blobstore"
+)
+
+const rbs = readBufferSize
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func fakeFetchData(req *pb.FetchDataRequest, res *pb.FetchDataResponse) error {
+ i0 := int(*req.StartIndex)
+ i1 := int(*req.EndIndex + 1) // Blobstore's end-indices are inclusive; Go's are exclusive.
+ bk := *req.BlobKey
+ if i := strings.Index(bk, "."); i != -1 {
+ // Strip everything past the ".".
+ bk = bk[:i]
+ }
+ switch bk {
+ case "a14p":
+ const s = "abcdefghijklmnop"
+ i0 := min(len(s), i0)
+ i1 := min(len(s), i1)
+ res.Data = []byte(s[i0:i1])
+ case "longBlob":
+ res.Data = make([]byte, i1-i0)
+ for i := range res.Data {
+ res.Data[i] = 'A' + uint8(i0/rbs)
+ i0++
+ }
+ }
+ return nil
+}
+
+// step is one step of a readerTest.
+// It consists of a Reader method to call, the method arguments
+// (lenp, offset, whence) and the expected results.
+type step struct {
+ method string
+ lenp int
+ offset int64
+ whence int
+ want string
+ wantErr error
+}
+
+var readerTest = []struct {
+ blobKey string
+ step []step
+}{
+ {"noSuchBlobKey", []step{
+ {"Read", 8, 0, 0, "", io.EOF},
+ }},
+ {"a14p.0", []step{
+ // Test basic reads.
+ {"Read", 1, 0, 0, "a", nil},
+ {"Read", 3, 0, 0, "bcd", nil},
+ {"Read", 1, 0, 0, "e", nil},
+ {"Read", 2, 0, 0, "fg", nil},
+ // Test Seek.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 5, 0, 0, "cdefg", nil},
+ {"Seek", 0, 2, os.SEEK_CUR, "9", nil},
+ {"Read", 1, 0, 0, "j", nil},
+ // Test reads up to and past EOF.
+ {"Read", 5, 0, 0, "klmno", nil},
+ {"Read", 5, 0, 0, "p", nil},
+ {"Read", 5, 0, 0, "", io.EOF},
+ // Test ReadAt.
+ {"ReadAt", 4, 0, 0, "abcd", nil},
+ {"ReadAt", 4, 3, 0, "defg", nil},
+ {"ReadAt", 4, 12, 0, "mnop", nil},
+ {"ReadAt", 4, 13, 0, "nop", io.EOF},
+ {"ReadAt", 4, 99, 0, "", io.EOF},
+ }},
+ {"a14p.1", []step{
+ // Test Seek before any reads.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 1, 0, 0, "c", nil},
+ // Test that ReadAt doesn't affect the Read offset.
+ {"ReadAt", 3, 9, 0, "jkl", nil},
+ {"Read", 3, 0, 0, "def", nil},
+ }},
+ {"a14p.2", []step{
+ // Test ReadAt before any reads or seeks.
+ {"ReadAt", 2, 14, 0, "op", nil},
+ }},
+ {"longBlob.0", []step{
+ // Test basic read.
+ {"Read", 1, 0, 0, "A", nil},
+ // Test that Read returns early when the buffer is exhausted.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AA", nil},
+ {"Read", 3, 0, 0, "BBB", nil},
+ // Test that what we just read is still in the buffer.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AABBB", nil},
+ // Test ReadAt.
+ {"ReadAt", 3, rbs - 4, 0, "AAA", nil},
+ {"ReadAt", 6, rbs - 4, 0, "AAAABB", nil},
+ {"ReadAt", 8, rbs - 4, 0, "AAAABBBB", nil},
+ {"ReadAt", 5, rbs - 4, 0, "AAAAB", nil},
+ {"ReadAt", 2, rbs - 4, 0, "AA", nil},
+ // Test seeking backwards from the Read offset.
+ {"Seek", 0, 2*rbs - 8, os.SEEK_SET, strconv.Itoa(2*rbs - 8), nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 8, 0, 0, "BBBBCCCC", nil},
+ }},
+ {"longBlob.1", []step{
+ // Test ReadAt with a slice larger than the buffer size.
+ {"LargeReadAt", 2*rbs - 2, 0, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 0, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 0, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 0, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 0, 0, strconv.Itoa(2*rbs + 2), nil},
+ {"LargeReadAt", 2*rbs - 2, 1, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 1, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 1, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 1, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 1, 0, strconv.Itoa(2*rbs + 2), nil},
+ }},
+}
+
+func TestReader(t *testing.T) {
+ for _, rt := range readerTest {
+ c := aetesting.FakeSingleContext(t, "blobstore", "FetchData", fakeFetchData)
+ r := NewReader(c, appengine.BlobKey(rt.blobKey))
+ for i, step := range rt.step {
+ var (
+ got string
+ gotErr error
+ n int
+ offset int64
+ )
+ switch step.method {
+ case "LargeReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = strconv.Itoa(n)
+ case "Read":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.Read(p)
+ got = string(p[:n])
+ case "ReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = string(p[:n])
+ case "Seek":
+ offset, gotErr = r.Seek(step.offset, step.whence)
+ got = strconv.FormatInt(offset, 10)
+ default:
+ t.Fatalf("unknown method: %s", step.method)
+ }
+ if gotErr != step.wantErr {
+ t.Fatalf("%s step %d: got error %v want %v", rt.blobKey, i, gotErr, step.wantErr)
+ }
+ if got != step.want {
+ t.Fatalf("%s step %d: got %q want %q", rt.blobKey, i, got, step.want)
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go
new file mode 100644
index 000000000..578b1f550
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/read.go
@@ -0,0 +1,160 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+// openBlob returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func openBlob(c context.Context, blobKey appengine.BlobKey) Reader {
+ return &reader{
+ c: c,
+ blobKey: blobKey,
+ }
+}
+
+const readBufferSize = 256 * 1024
+
+// reader is a blob reader. It implements the Reader interface.
+type reader struct {
+ c context.Context
+
+ // Either blobKey or filename is set:
+ blobKey appengine.BlobKey
+ filename string
+
+ closeFunc func() // is nil if unavailable or already closed.
+
+ // buf is the read buffer. r is how much of buf has been read.
+ // off is the offset of buf[0] relative to the start of the blob.
+ // An invariant is 0 <= r && r <= len(buf).
+ // Reads that don't require an RPC call will increment r but not off.
+ // Seeks may modify r without discarding the buffer, but only if the
+ // invariant can be maintained.
+ mu sync.Mutex
+ buf []byte
+ r int
+ off int64
+}
+
+func (r *reader) Close() error {
+ if f := r.closeFunc; f != nil {
+ f()
+ }
+ r.closeFunc = nil
+ return nil
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.r == len(r.buf) {
+ if err := r.fetch(r.off + int64(r.r)); err != nil {
+ return 0, err
+ }
+ }
+ n := copy(p, r.buf[r.r:])
+ r.r += n
+ return n, nil
+}
+
+func (r *reader) ReadAt(p []byte, off int64) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ // Convert relative offsets to absolute offsets.
+ ab0 := r.off + int64(r.r)
+ ab1 := r.off + int64(len(r.buf))
+ ap0 := off
+ ap1 := off + int64(len(p))
+ // Check if we can satisfy the read entirely out of the existing buffer.
+ if r.off <= ap0 && ap1 <= ab1 {
+ // Convert off from an absolute offset to a relative offset.
+ rp0 := int(ap0 - r.off)
+ return copy(p, r.buf[rp0:]), nil
+ }
+ // Restore the original Read/Seek offset after ReadAt completes.
+ defer r.seek(ab0)
+ // Repeatedly fetch and copy until we have filled p.
+ n := 0
+ for len(p) > 0 {
+ if err := r.fetch(off + int64(n)); err != nil {
+ return n, err
+ }
+ r.r = copy(p, r.buf)
+ n += r.r
+ p = p[r.r:]
+ }
+ return n, nil
+}
+
+func (r *reader) Seek(offset int64, whence int) (ret int64, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ switch whence {
+ case os.SEEK_SET:
+ ret = offset
+ case os.SEEK_CUR:
+ ret = r.off + int64(r.r) + offset
+ case os.SEEK_END:
+ return 0, errors.New("seeking relative to the end of a blob isn't supported")
+ default:
+ return 0, fmt.Errorf("invalid Seek whence value: %d", whence)
+ }
+ if ret < 0 {
+ return 0, errors.New("negative Seek offset")
+ }
+ return r.seek(ret)
+}
+
+// fetch fetches readBufferSize bytes starting at the given offset. On success,
+// the data is saved as r.buf.
+func (r *reader) fetch(off int64) error {
+ req := &blobpb.FetchDataRequest{
+ BlobKey: proto.String(string(r.blobKey)),
+ StartIndex: proto.Int64(off),
+ EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive.
+ }
+ res := &blobpb.FetchDataResponse{}
+ if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil {
+ return err
+ }
+ if len(res.Data) == 0 {
+ return io.EOF
+ }
+ r.buf, r.r, r.off = res.Data, 0, off
+ return nil
+}
+
+// seek seeks to the given offset with an effective whence equal to SEEK_SET.
+// It discards the read buffer if the invariant cannot be maintained.
+func (r *reader) seek(off int64) (int64, error) {
+ delta := off - r.off
+ if delta >= 0 && delta < int64(len(r.buf)) {
+ r.r = int(delta)
+ return off, nil
+ }
+ r.buf, r.r, r.off = nil, 0, off
+ return off, nil
+}
diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go
new file mode 100644
index 000000000..3a60bd55f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/capability/capability.go
@@ -0,0 +1,52 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package capability exposes information about outages and scheduled downtime
+for specific API capabilities.
+
+This package does not work in App Engine "flexible environment".
+
+Example:
+ if !capability.Enabled(c, "datastore_v3", "write") {
+ // show user a different page
+ }
+*/
+package capability // import "google.golang.org/appengine/capability"
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/log"
+
+ pb "google.golang.org/appengine/internal/capability"
+)
+
+// Enabled returns whether an API's capabilities are enabled.
+// The wildcard "*" capability matches every capability of an API.
+// If the underlying RPC fails (if the package is unknown, for example),
+// false is returned and information is written to the application log.
+func Enabled(ctx context.Context, api, capability string) bool {
+ req := &pb.IsEnabledRequest{
+ Package: &api,
+ Capability: []string{capability},
+ }
+ res := &pb.IsEnabledResponse{}
+ if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil {
+ log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err)
+ return false
+ }
+ switch *res.SummaryStatus {
+ case pb.IsEnabledResponse_ENABLED,
+ pb.IsEnabledResponse_SCHEDULED_FUTURE,
+ pb.IsEnabledResponse_SCHEDULED_NOW:
+ return true
+ case pb.IsEnabledResponse_UNKNOWN:
+ log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability)
+ return false
+ default:
+ return false
+ }
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go
new file mode 100644
index 000000000..96945f6d6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package channel implements the server side of App Engine's Channel API.
+
+Create creates a new channel associated with the given clientID,
+which must be unique to the client that will use the returned token.
+
+ token, err := channel.Create(c, "player1")
+ if err != nil {
+ // handle error
+ }
+ // return token to the client in an HTTP response
+
+Send sends a message to the client over the channel identified by clientID.
+
+ channel.Send(c, "player1", "Game over!")
+
+Deprecated: The Channel API feature has been deprecated and is going to be removed. See the Channel API Turndown document for details and timetable.
+
+https://cloud.google.com/appengine/docs/deprecations/channel
+*/
+package channel // import "google.golang.org/appengine/channel"
+
+import (
+ "encoding/json"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/channel"
+)
+
+// Create creates a channel and returns a token for use by the client.
+// The clientID is an application-provided string used to identify the client.
+func Create(c context.Context, clientID string) (token string, err error) {
+ req := &pb.CreateChannelRequest{
+ ApplicationKey: &clientID,
+ }
+ resp := &pb.CreateChannelResponse{}
+ err = internal.Call(c, service, "CreateChannel", req, resp)
+ token = resp.GetToken()
+ return token, remapError(err)
+}
+
+// Send sends a message on the channel associated with clientID.
+func Send(c context.Context, clientID, message string) error {
+ req := &pb.SendMessageRequest{
+ ApplicationKey: &clientID,
+ Message: &message,
+ }
+ resp := &basepb.VoidProto{}
+ return remapError(internal.Call(c, service, "SendChannelMessage", req, resp))
+}
+
+// SendJSON is a helper function that sends a JSON-encoded value
+// on the channel associated with clientID.
+func SendJSON(c context.Context, clientID string, value interface{}) error {
+ m, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return Send(c, clientID, string(m))
+}
+
+// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
+func remapError(err error) error {
+ if e, ok := err.(*internal.APIError); ok {
+ if e.Service == "xmpp" {
+ e.Service = "channel"
+ }
+ }
+ return err
+}
+
+var service = "xmpp" // prod
+
+func init() {
+ if appengine.IsDevAppServer() {
+ service = "channel" // dev
+ }
+ internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel_test.go b/vendor/google.golang.org/appengine/channel/channel_test.go
new file mode 100644
index 000000000..c7498eb83
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package channel
+
+import (
+ "testing"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestRemapError(t *testing.T) {
+ err := &internal.APIError{
+ Service: "xmpp",
+ }
+ err = remapError(err).(*internal.APIError)
+ if err.Service != "channel" {
+ t.Errorf("err.Service = %q, want %q", err.Service, "channel")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
new file mode 100644
index 000000000..7b27e6b12
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
@@ -0,0 +1,62 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package cloudsql exposes access to Google Cloud SQL databases.
+
+This package does not work in App Engine "flexible environment".
+
+This package is intended for MySQL drivers to make App Engine-specific
+connections. Applications should use this package through database/sql:
+Select a pure Go MySQL driver that supports this package, and use sql.Open
+with protocol "cloudsql" and an address of the Cloud SQL instance.
+
+A Go MySQL driver that has been tested to work well with Cloud SQL
+is the go-sql-driver:
+ import "database/sql"
+ import _ "github.com/go-sql-driver/mysql"
+
+ db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
+
+
+Another driver that works well with Cloud SQL is the mymysql driver:
+ import "database/sql"
+ import _ "github.com/ziutek/mymysql/godrv"
+
+ db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
+
+
+Using either of these drivers, you can perform a standard SQL query.
+This example assumes there is a table named 'users' with
+columns 'first_name' and 'last_name':
+
+ rows, err := db.Query("SELECT first_name, last_name FROM users")
+ if err != nil {
+ log.Errorf(ctx, "db.Query: %v", err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var firstName string
+ var lastName string
+ if err := rows.Scan(&firstName, &lastName); err != nil {
+ log.Errorf(ctx, "rows.Scan: %v", err)
+ continue
+ }
+ log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
+ }
+ if err := rows.Err(); err != nil {
+ log.Errorf(ctx, "Row error: %v", err)
+ }
+*/
+package cloudsql
+
+import (
+ "net"
+)
+
+// Dial connects to the named Cloud SQL instance.
+func Dial(instance string) (net.Conn, error) {
+ return connect(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
new file mode 100644
index 000000000..af62dba14
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
@@ -0,0 +1,17 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package cloudsql
+
+import (
+ "net"
+
+ "appengine/cloudsql"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return cloudsql.Dial(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
new file mode 100644
index 000000000..90fa7b31e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package cloudsql
+
+import (
+ "errors"
+ "net"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
new file mode 100644
index 000000000..c66849e83
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
@@ -0,0 +1,342 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aebundler turns a Go app into a fully self-contained tar file.
+// The app and its subdirectories (if any) are placed under "."
+// and the dependencies from $GOPATH are placed under ./_gopath/src.
+// A main func is synthesized if one does not exist.
+//
+// A sample Dockerfile to be used with this bundler could look like this:
+// FROM gcr.io/google-appengine/go-compat
+// ADD . /app
+// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
+package main
+
+import (
+ "archive/tar"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ output = flag.String("o", "", "name of output tar file or '-' for stdout")
+ rootDir = flag.String("root", ".", "directory name of application root")
+ vm = flag.Bool("vm", true, `bundle an app for App Engine "flexible environment"`)
+
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+)
+
+const (
+ newMain = `package main
+import "google.golang.org/appengine"
+func main() {
+ appengine.Main()
+}
+`
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s -o <file.tar|->\tBundle app to named tar file or stdout\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\noptional arguments:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ var tags []string
+ if *vm {
+ tags = append(tags, "appenginevm")
+ } else {
+ tags = append(tags, "appengine")
+ }
+
+ tarFile := *output
+ if tarFile == "" {
+ usage()
+ errorf("Required -o flag not specified.")
+ }
+
+ app, err := analyze(tags)
+ if err != nil {
+ errorf("Error analyzing app: %v", err)
+ }
+ if err := app.bundle(tarFile); err != nil {
+ errorf("Unable to bundle app: %v", err)
+ }
+}
+
+// errorf prints the error message and exits.
+func errorf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...)
+ os.Exit(1)
+}
+
+type app struct {
+ hasMain bool
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns hasMain,
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ hasMain, appFiles, err := checkMain(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, *rootDir, gopath)
+ return &app{
+ hasMain: hasMain,
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: build.Default.GOARCH,
+ GOOS: build.Default.GOOS,
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(build.Default.BuildTags, tags...),
+ }
+}
+
+// bundle bundles the app into the named tarFile ("-"==stdout).
+func (s *app) bundle(tarFile string) (err error) {
+ var out io.Writer
+ if tarFile == "-" {
+ out = os.Stdout
+ } else {
+ f, err := os.Create(tarFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ out = f
+ }
+ tw := tar.NewWriter(out)
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err = copyTree(tw, dstDir, srcDir); err != nil {
+ return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(tw, ".", *rootDir); err != nil {
+ return fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ if !s.hasMain {
+ if err := synthesizeMain(tw, s.appFiles); err != nil {
+ return fmt.Errorf("unable to synthesize new main func: %v", err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ return fmt.Errorf("unable to close tar file %v: %v", tarFile, err)
+ }
+ return nil
+}
+
+// synthesizeMain generates a new main func and writes it to the tarball.
+func synthesizeMain(tw *tar.Writer, appFiles []string) error {
+ appMap := make(map[string]bool)
+ for _, f := range appFiles {
+ appMap[f] = true
+ }
+ var f string
+ for i := 0; i < 100; i++ {
+ f = fmt.Sprintf("app_main%d.go", i)
+ if !appMap[filepath.Join(*rootDir, f)] {
+ break
+ }
+ }
+ if appMap[filepath.Join(*rootDir, f)] {
+ return fmt.Errorf("unable to find unique name for %v", f)
+ }
+ hdr := &tar.Header{
+ Name: f,
+ Mode: 0644,
+ Size: int64(len(newMain)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", f, err)
+ }
+ if _, err := tw.Write([]byte(newMain)); err != nil {
+ return fmt.Errorf("unable to write %v to tar file: %v", f, err)
+ }
+ return nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to tar file dstDir, ignoring skipFiles.
+func copyTree(tw *tar.Writer, dstDir, srcDir string) error {
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %v: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to tar file dst.
+func copyFile(tw *tar.Writer, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %v: %v", src, err)
+ }
+ defer s.Close()
+ fi, err := s.Stat()
+ if err != nil {
+ return fmt.Errorf("unable to stat %v: %v", src, err)
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, dst)
+ if err != nil {
+ return fmt.Errorf("unable to create tar header for %v: %v", dst, err)
+ }
+ hdr.Name = dst
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", dst, err)
+ }
+ _, err = io.Copy(tw, s)
+ if err != nil {
+ return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err)
+ }
+ return nil
+}
+
+// checkMain verifies that there is a single "main" function.
+// It also returns a list of all Go source files in the app.
+func checkMain(ctxt *build.Context) (bool, []string, error) {
+ pkg, err := ctxt.ImportDir(*rootDir, 0)
+ if err != nil {
+ return false, nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+ if !pkg.IsCommand() {
+ errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name)
+ }
+ // Search for a "func main"
+ var hasMain bool
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(*rootDir, f)
+ appFiles = append(appFiles, n)
+ if hasMain, err = readFile(n); err != nil {
+ return false, nil, fmt.Errorf("error parsing %q: %v", n, err)
+ }
+ }
+ return hasMain, appFiles, nil
+}
+
+// isMain returns whether the given function declaration is a main function.
+// Such a function must be called "main", not have a receiver, and have no arguments or return types.
+func isMain(f *ast.FuncDecl) bool {
+ ft := f.Type
+ return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0
+}
+
+// readFile reads and parses the Go source code file and returns whether it has a main function.
+func readFile(filename string) (hasMain bool, err error) {
+ var src []byte
+ src, err = ioutil.ReadFile(filename)
+ if err != nil {
+ return
+ }
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ for _, decl := range file.Decls {
+ funcDecl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !isMain(funcDecl) {
+ continue
+ }
+ hasMain = true
+ break
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
new file mode 100644
index 000000000..8093c93ff
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
@@ -0,0 +1,72 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aedeploy assists with deploying App Engine "flexible environment" Go apps to production.
+// A temporary directory is created; the app, its subdirectories, and all its
+// dependencies from $GOPATH are copied into the directory; then the app
+// is deployed to production with the provided command.
+//
+// The app must be in "package main".
+//
+// This command must be issued from within the root directory of the app
+// (where the app.yaml file is located).
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0])
+}
+
+var verbose bool
+
+// vlogf logs to stderr if the "-v" flag is provided.
+func vlogf(f string, v ...interface{}) {
+ if !verbose {
+ return
+ }
+ log.Printf("[aedeploy] "+f, v...)
+}
+
+func main() {
+ flag.BoolVar(&verbose, "v", false, "Verbose logging.")
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() < 1 {
+ usage()
+ os.Exit(1)
+ }
+
+ notice := func() {
+ fmt.Fprintln(os.Stderr, `NOTICE: aedeploy is deprecated. Just use "gcloud app deploy".`)
+ }
+
+ notice()
+ if err := deploy(); err != nil {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err)
+ notice()
+ fmt.Fprintln(os.Stderr, `You might need to update gcloud. Run "gcloud components update".`)
+ os.Exit(1)
+ }
+ notice() // Make sure they see it at the end.
+}
+
+// deploy calls the provided command to deploy the app from the temporary directory.
+func deploy() error {
+ vlogf("Running command %v", flag.Args())
+ cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae.go b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
new file mode 100644
index 000000000..0fe2d4ae9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
@@ -0,0 +1,185 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ ctxPackage = "golang.org/x/net/context"
+
+ newPackageBase = "google.golang.org/"
+ stutterPackage = false
+)
+
+func init() {
+ register(fix{
+ "ae",
+ "2016-04-15",
+ aeFn,
+ `Update old App Engine APIs to new App Engine APIs`,
+ })
+}
+
+// logMethod is the set of methods on appengine.Context used for logging.
+var logMethod = map[string]bool{
+ "Debugf": true,
+ "Infof": true,
+ "Warningf": true,
+ "Errorf": true,
+ "Criticalf": true,
+}
+
+// mapPackage turns "appengine" into "google.golang.org/appengine", etc.
+func mapPackage(s string) string {
+ if stutterPackage {
+ s += "/" + path.Base(s)
+ }
+ return newPackageBase + s
+}
+
+func aeFn(f *ast.File) bool {
+ // During the walk, we track the last thing seen that looks like
+ // an appengine.Context, and reset it once the walk leaves a func.
+ var lastContext *ast.Ident
+
+ fixed := false
+
+ // Update imports.
+ mainImp := "appengine"
+ for _, imp := range f.Imports {
+ pth, _ := strconv.Unquote(imp.Path.Value)
+ if pth == "appengine" || strings.HasPrefix(pth, "appengine/") {
+ newPth := mapPackage(pth)
+ imp.Path.Value = strconv.Quote(newPth)
+ fixed = true
+
+ if pth == "appengine" {
+ mainImp = newPth
+ }
+ }
+ }
+
+ // Update any API changes.
+ walk(f, func(n interface{}) {
+ if ft, ok := n.(*ast.FuncType); ok && ft.Params != nil {
+ // See if this func has an `appengine.Context arg`.
+ // If so, remember its identifier.
+ for _, param := range ft.Params.List {
+ if !isPkgDot(param.Type, "appengine", "Context") {
+ continue
+ }
+ if len(param.Names) == 1 {
+ lastContext = param.Names[0]
+ break
+ }
+ }
+ return
+ }
+
+ if as, ok := n.(*ast.AssignStmt); ok {
+ if len(as.Lhs) == 1 && len(as.Rhs) == 1 {
+ // If this node is an assignment from an appengine.NewContext invocation,
+ // remember the identifier on the LHS.
+ if isCall(as.Rhs[0], "appengine", "NewContext") {
+ if ident, ok := as.Lhs[0].(*ast.Ident); ok {
+ lastContext = ident
+ return
+ }
+ }
+ // x (=|:=) appengine.Timeout(y, z)
+ // should become
+ // x, _ (=|:=) context.WithTimeout(y, z)
+ if isCall(as.Rhs[0], "appengine", "Timeout") {
+ addImport(f, ctxPackage)
+ as.Lhs = append(as.Lhs, ast.NewIdent("_"))
+ // isCall already did the type checking.
+ sel := as.Rhs[0].(*ast.CallExpr).Fun.(*ast.SelectorExpr)
+ sel.X = ast.NewIdent("context")
+ sel.Sel = ast.NewIdent("WithTimeout")
+ fixed = true
+ return
+ }
+ }
+ return
+ }
+
+ // If this node is a FuncDecl, we've finished the function, so reset lastContext.
+ if _, ok := n.(*ast.FuncDecl); ok {
+ lastContext = nil
+ return
+ }
+
+ if call, ok := n.(*ast.CallExpr); ok {
+ if isPkgDot(call.Fun, "appengine", "Datacenter") && len(call.Args) == 0 {
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ if isPkgDot(call.Fun, "taskqueue", "QueueStats") && len(call.Args) == 3 {
+ call.Args = call.Args[:2] // drop last arg
+ fixed = true
+ return
+ }
+
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ if lastContext != nil && refersTo(sel.X, lastContext) && logMethod[sel.Sel.Name] {
+ // c.Errorf(...)
+ // should become
+ // log.Errorf(c, ...)
+ addImport(f, mapPackage("appengine/log"))
+ sel.X = &ast.Ident{ // ast.NewIdent doesn't preserve the position.
+ NamePos: sel.X.Pos(),
+ Name: "log",
+ }
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ }
+ })
+
+ // Change any `appengine.Context` to `context.Context`.
+ // Do this in a separate walk because the previous walk
+ // wants to identify "appengine.Context".
+ walk(f, func(n interface{}) {
+ expr, ok := n.(ast.Expr)
+ if ok && isPkgDot(expr, "appengine", "Context") {
+ addImport(f, ctxPackage)
+ // isPkgDot did the type checking.
+ n.(*ast.SelectorExpr).X.(*ast.Ident).Name = "context"
+ fixed = true
+ return
+ }
+ })
+
+ // The changes above might remove the need to import "appengine".
+ // Check if it's used, and drop it if it isn't.
+ if fixed && !usesImport(f, mainImp) {
+ deleteImport(f, mainImp)
+ }
+
+ return fixed
+}
+
+// ctx may be nil.
+func insertContext(f *ast.File, call *ast.CallExpr, ctx *ast.Ident) {
+ if ctx == nil {
+ // context is unknown, so use a plain "ctx".
+ ctx = ast.NewIdent("ctx")
+ } else {
+ // Create a fresh *ast.Ident so we drop the position information.
+ ctx = ast.NewIdent(ctx.Name)
+ }
+
+ call.Args = append([]ast.Expr{ctx}, call.Args...)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
new file mode 100644
index 000000000..21f5695b9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
@@ -0,0 +1,144 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+func init() {
+ addTestCases(aeTests, nil)
+}
+
+var aeTests = []testCase{
+ // Collection of fixes:
+ // - imports
+ // - appengine.Timeout -> context.WithTimeout
+ // - add ctx arg to appengine.Datacenter
+ // - logging API
+ {
+ Name: "ae.0",
+ In: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c = appengine.Timeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ c.Errorf("Something interesting happened: %v", err)
+ _ = appengine.Datacenter()
+}
+`,
+ Out: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c, _ = context.WithTimeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ log.Errorf(c, "Something interesting happened: %v", err)
+ _ = appengine.Datacenter(c)
+}
+`,
+ },
+
+ // Updating a function that takes an appengine.Context arg.
+ {
+ Name: "ae.1",
+ In: `package foo
+
+import (
+ "appengine"
+)
+
+func LogSomething(c2 appengine.Context) {
+ c2.Warningf("Stand back! I'm going to try science!")
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/log"
+)
+
+func LogSomething(c2 context.Context) {
+ log.Warningf(c2, "Stand back! I'm going to try science!")
+}
+`,
+ },
+
+ // Less widely used API changes:
+ // - drop maxTasks arg to taskqueue.QueueStats
+ {
+ Name: "ae.2",
+ In: `package foo
+
+import (
+ "appengine"
+ "appengine/taskqueue"
+)
+
+func f(ctx appengine.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}, 0)
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/taskqueue"
+)
+
+func f(ctx context.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"})
+}
+`,
+ },
+
+ // Check that the main "appengine" import will not be dropped
+ // if an appengine.Context -> context.Context change happens
+ // but the appengine package is still referenced.
+ {
+ Name: "ae.3",
+ In: `package foo
+
+import (
+ "appengine"
+ "io"
+)
+
+func f(ctx appengine.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "io"
+)
+
+func f(ctx context.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ },
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/fix.go b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
new file mode 100644
index 000000000..a100be794
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
@@ -0,0 +1,848 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type fix struct {
+ name string
+ date string // date that fix was introduced, in YYYY-MM-DD format
+ f func(*ast.File) bool
+ desc string
+}
+
+// main runs sort.Sort(byName(fixes)) before printing list of fixes.
+type byName []fix
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byName) Less(i, j int) bool { return f[i].name < f[j].name }
+
+// main runs sort.Sort(byDate(fixes)) before applying fixes.
+type byDate []fix
+
+func (f byDate) Len() int { return len(f) }
+func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date }
+
+var fixes []fix
+
+func register(f fix) {
+ fixes = append(fixes, f)
+}
+
+// walk traverses the AST x, calling visit(y) for each node y in the tree but
+// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt,
+// in a bottom-up traversal.
+func walk(x interface{}, visit func(interface{})) {
+ walkBeforeAfter(x, nop, visit)
+}
+
+func nop(interface{}) {}
+
+// walkBeforeAfter is like walk but calls before(x) before traversing
+// x's children and after(x) afterward.
+func walkBeforeAfter(x interface{}, before, after func(interface{})) {
+ before(x)
+
+ switch n := x.(type) {
+ default:
+ panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x))
+
+ case nil:
+
+ // pointers to interfaces
+ case *ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to struct pointers
+ case **ast.BlockStmt:
+ walkBeforeAfter(*n, before, after)
+ case **ast.CallExpr:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FieldList:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FuncType:
+ walkBeforeAfter(*n, before, after)
+ case **ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case **ast.BasicLit:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to slices
+ case *[]ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.File:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // These are ordered and grouped to match ../../pkg/go/ast/ast.go
+ case *ast.Field:
+ walkBeforeAfter(&n.Names, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ case *ast.FieldList:
+ for _, field := range n.List {
+ walkBeforeAfter(field, before, after)
+ }
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.Ellipsis:
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CompositeLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Elts, before, after)
+ case *ast.ParenExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SelectorExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.IndexExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Index, before, after)
+ case *ast.SliceExpr:
+ walkBeforeAfter(&n.X, before, after)
+ if n.Low != nil {
+ walkBeforeAfter(&n.Low, before, after)
+ }
+ if n.High != nil {
+ walkBeforeAfter(&n.High, before, after)
+ }
+ case *ast.TypeAssertExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ case *ast.CallExpr:
+ walkBeforeAfter(&n.Fun, before, after)
+ walkBeforeAfter(&n.Args, before, after)
+ case *ast.StarExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.UnaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.BinaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Y, before, after)
+ case *ast.KeyValueExpr:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.ArrayType:
+ walkBeforeAfter(&n.Len, before, after)
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.StructType:
+ walkBeforeAfter(&n.Fields, before, after)
+ case *ast.FuncType:
+ walkBeforeAfter(&n.Params, before, after)
+ if n.Results != nil {
+ walkBeforeAfter(&n.Results, before, after)
+ }
+ case *ast.InterfaceType:
+ walkBeforeAfter(&n.Methods, before, after)
+ case *ast.MapType:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.ChanType:
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.BadStmt:
+ case *ast.DeclStmt:
+ walkBeforeAfter(&n.Decl, before, after)
+ case *ast.EmptyStmt:
+ case *ast.LabeledStmt:
+ walkBeforeAfter(&n.Stmt, before, after)
+ case *ast.ExprStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SendStmt:
+ walkBeforeAfter(&n.Chan, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.IncDecStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.AssignStmt:
+ walkBeforeAfter(&n.Lhs, before, after)
+ walkBeforeAfter(&n.Rhs, before, after)
+ case *ast.GoStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.DeferStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.ReturnStmt:
+ walkBeforeAfter(&n.Results, before, after)
+ case *ast.BranchStmt:
+ case *ast.BlockStmt:
+ walkBeforeAfter(&n.List, before, after)
+ case *ast.IfStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ walkBeforeAfter(&n.Else, before, after)
+ case *ast.CaseClause:
+ walkBeforeAfter(&n.List, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.TypeSwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Assign, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CommClause:
+ walkBeforeAfter(&n.Comm, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SelectStmt:
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.ForStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Post, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.RangeStmt:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+
+ case *ast.ImportSpec:
+ case *ast.ValueSpec:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Values, before, after)
+ walkBeforeAfter(&n.Names, before, after)
+ case *ast.TypeSpec:
+ walkBeforeAfter(&n.Type, before, after)
+
+ case *ast.BadDecl:
+ case *ast.GenDecl:
+ walkBeforeAfter(&n.Specs, before, after)
+ case *ast.FuncDecl:
+ if n.Recv != nil {
+ walkBeforeAfter(&n.Recv, before, after)
+ }
+ walkBeforeAfter(&n.Type, before, after)
+ if n.Body != nil {
+ walkBeforeAfter(&n.Body, before, after)
+ }
+
+ case *ast.File:
+ walkBeforeAfter(&n.Decls, before, after)
+
+ case *ast.Package:
+ walkBeforeAfter(&n.Files, before, after)
+
+ case []*ast.File:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Decl:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Expr:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []*ast.Ident:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Stmt:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Spec:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ }
+ after(x)
+}
+
+// imports returns true if f imports path.
+func imports(f *ast.File, path string) bool {
+ return importSpec(f, path) != nil
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if importPath(s) == path {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+// declImports reports whether gen contains an import of path.
+func declImports(gen *ast.GenDecl, path string) bool {
+ if gen.Tok != token.IMPORT {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if importPath(impspec) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// isPkgDot returns true if t is the expression "pkg.name"
+// where pkg is an imported identifier.
+func isPkgDot(t ast.Expr, pkg, name string) bool {
+ sel, ok := t.(*ast.SelectorExpr)
+ return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name
+}
+
+// isPtrPkgDot returns true if f is the expression "*pkg.name"
+// where pkg is an imported identifier.
+func isPtrPkgDot(t ast.Expr, pkg, name string) bool {
+ ptr, ok := t.(*ast.StarExpr)
+ return ok && isPkgDot(ptr.X, pkg, name)
+}
+
+// isTopName returns true if n is a top-level unresolved identifier with the given name.
+func isTopName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.Name == name && id.Obj == nil
+}
+
+// isName returns true if n is an identifier with the given name.
+func isName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.String() == name
+}
+
+// isCall returns true if t is a call to pkg.name.
+func isCall(t ast.Expr, pkg, name string) bool {
+ call, ok := t.(*ast.CallExpr)
+ return ok && isPkgDot(call.Fun, pkg, name)
+}
+
+// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil.
+func isIdent(n interface{}) *ast.Ident {
+ id, _ := n.(*ast.Ident)
+ return id
+}
+
+// refersTo returns true if n is a reference to the same object as x.
+func refersTo(n ast.Node, x *ast.Ident) bool {
+ id, ok := n.(*ast.Ident)
+ // The test of id.Name == x.Name handles top-level unresolved
+ // identifiers, which all have Obj == nil.
+ return ok && id.Obj == x.Obj && id.Name == x.Name
+}
+
+// isBlank returns true if n is the blank identifier.
+func isBlank(n ast.Expr) bool {
+ return isName(n, "_")
+}
+
+// isEmptyString returns true if n is an empty string literal.
+func isEmptyString(n ast.Expr) bool {
+ lit, ok := n.(*ast.BasicLit)
+ return ok && lit.Kind == token.STRING && len(lit.Value) == 2
+}
+
+func warn(pos token.Pos, msg string, args ...interface{}) {
+ if pos.IsValid() {
+ msg = "%s: " + msg
+ arg1 := []interface{}{fset.Position(pos).String()}
+ args = append(arg1, args...)
+ }
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+}
+
+// countUses returns the number of uses of the identifier x in scope.
+func countUses(x *ast.Ident, scope []ast.Stmt) int {
+ count := 0
+ ff := func(n interface{}) {
+ if n, ok := n.(ast.Node); ok && refersTo(n, x) {
+ count++
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+ return count
+}
+
+// rewriteUses replaces all uses of the identifier x and !x in scope
+// with f(x.Pos()) and fnot(x.Pos()).
+func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) {
+ var lastF ast.Expr
+ ff := func(n interface{}) {
+ ptr, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ nn := *ptr
+
+ // The child node was just walked and possibly replaced.
+ // If it was replaced and this is a negation, replace with fnot(p).
+ not, ok := nn.(*ast.UnaryExpr)
+ if ok && not.Op == token.NOT && not.X == lastF {
+ *ptr = fnot(nn.Pos())
+ return
+ }
+ if refersTo(nn, x) {
+ lastF = f(nn.Pos())
+ *ptr = lastF
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+}
+
+// assignsTo returns true if any of the code in scope assigns to or takes the address of x.
+func assignsTo(x *ast.Ident, scope []ast.Stmt) bool {
+ assigned := false
+ ff := func(n interface{}) {
+ if assigned {
+ return
+ }
+ switch n := n.(type) {
+ case *ast.UnaryExpr:
+ // use of &x
+ if n.Op == token.AND && refersTo(n.X, x) {
+ assigned = true
+ return
+ }
+ case *ast.AssignStmt:
+ for _, l := range n.Lhs {
+ if refersTo(l, x) {
+ assigned = true
+ return
+ }
+ }
+ }
+ }
+ for _, n := range scope {
+ if assigned {
+ break
+ }
+ walk(n, ff)
+ }
+ return assigned
+}
+
+// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos.
+func newPkgDot(pos token.Pos, pkg, name string) ast.Expr {
+ return &ast.SelectorExpr{
+ X: &ast.Ident{
+ NamePos: pos,
+ Name: pkg,
+ },
+ Sel: &ast.Ident{
+ NamePos: pos,
+ Name: name,
+ },
+ }
+}
+
+// renameTop renames all references to the top-level name old.
+// It returns true if it makes any changes.
+func renameTop(f *ast.File, old, new string) bool {
+ var fixed bool
+
+ // Rename any conflicting imports
+ // (assuming package name is last element of path).
+ for _, s := range f.Imports {
+ if s.Name != nil {
+ if s.Name.Name == old {
+ s.Name.Name = new
+ fixed = true
+ }
+ } else {
+ _, thisName := path.Split(importPath(s))
+ if thisName == old {
+ s.Name = ast.NewIdent(new)
+ fixed = true
+ }
+ }
+ }
+
+ // Rename any top-level declarations.
+ for _, d := range f.Decls {
+ switch d := d.(type) {
+ case *ast.FuncDecl:
+ if d.Recv == nil && d.Name.Name == old {
+ d.Name.Name = new
+ d.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.GenDecl:
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if s.Name.Name == old {
+ s.Name.Name = new
+ s.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.ValueSpec:
+ for _, n := range s.Names {
+ if n.Name == old {
+ n.Name = new
+ n.Obj.Name = new
+ fixed = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Rename top-level old to new, both unresolved names
+ // (probably defined in another file) and names that resolve
+ // to a declaration we renamed.
+ walk(f, func(n interface{}) {
+ id, ok := n.(*ast.Ident)
+ if ok && isTopName(id, old) {
+ id.Name = new
+ fixed = true
+ }
+ if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new {
+ id.Name = id.Obj.Name
+ fixed = true
+ }
+ })
+
+ return fixed
+}
+
+// matchLen returns the length of the longest prefix shared by x and y.
+func matchLen(x, y string) int {
+ i := 0
+ for i < len(x) && i < len(y) && x[i] == y[i] {
+ i++
+ }
+ return i
+}
+
+// addImport adds the import path to the file f, if absent.
+func addImport(f *ast.File, ipath string) (added bool) {
+ if imports(f, ipath) {
+ return false
+ }
+
+ // Determine name of import.
+ // Assume added imports follow convention of using last element.
+ _, name := path.Split(ipath)
+
+ // Rename any conflicting top-level references from name to name_.
+ renameTop(f, name, name+"_")
+
+ newImport := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(ipath),
+ },
+ }
+
+ // Find an import decl to add to.
+ var (
+ bestMatch = -1
+ lastImport = -1
+ impDecl *ast.GenDecl
+ impIndex = -1
+ )
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if ok && gen.Tok == token.IMPORT {
+ lastImport = i
+ // Do not add to import "C", to avoid disrupting the
+ // association with its doc comment, breaking cgo.
+ if declImports(gen, "C") {
+ continue
+ }
+
+ // Compute longest shared prefix with imports in this block.
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ n := matchLen(importPath(impspec), ipath)
+ if n > bestMatch {
+ bestMatch = n
+ impDecl = gen
+ impIndex = j
+ }
+ }
+ }
+ }
+
+ // If no import decl found, add one after the last import.
+ if impDecl == nil {
+ impDecl = &ast.GenDecl{
+ Tok: token.IMPORT,
+ }
+ f.Decls = append(f.Decls, nil)
+ copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
+ f.Decls[lastImport+1] = impDecl
+ }
+
+ // Ensure the import decl has parentheses, if needed.
+ if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() {
+ impDecl.Lparen = impDecl.Pos()
+ }
+
+ insertAt := impIndex + 1
+ if insertAt == 0 {
+ insertAt = len(impDecl.Specs)
+ }
+ impDecl.Specs = append(impDecl.Specs, nil)
+ copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
+ impDecl.Specs[insertAt] = newImport
+ if insertAt > 0 {
+ // Assign same position as the previous import,
+ // so that the sorter sees it as being in the same block.
+ prev := impDecl.Specs[insertAt-1]
+ newImport.Path.ValuePos = prev.Pos()
+ newImport.EndPos = prev.Pos()
+ }
+
+ f.Imports = append(f.Imports, newImport)
+ return true
+}
+
+// deleteImport deletes the import path from the file f, if present.
+func deleteImport(f *ast.File, path string) (deleted bool) {
+ oldImport := importSpec(f, path)
+
+ // Find the import node that imports path, if any.
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT {
+ continue
+ }
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if oldImport != impspec {
+ continue
+ }
+
+ // We found an import spec that imports path.
+ // Delete it.
+ deleted = true
+ copy(gen.Specs[j:], gen.Specs[j+1:])
+ gen.Specs = gen.Specs[:len(gen.Specs)-1]
+
+ // If this was the last import spec in this decl,
+ // delete the decl, too.
+ if len(gen.Specs) == 0 {
+ copy(f.Decls[i:], f.Decls[i+1:])
+ f.Decls = f.Decls[:len(f.Decls)-1]
+ } else if len(gen.Specs) == 1 {
+ gen.Lparen = token.NoPos // drop parens
+ }
+ if j > 0 {
+ // We deleted an entry but now there will be
+ // a blank line-sized hole where the import was.
+ // Close the hole by making the previous
+ // import appear to "end" where this one did.
+ gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End()
+ }
+ break
+ }
+ }
+
+ // Delete it from f.Imports.
+ for i, imp := range f.Imports {
+ if imp == oldImport {
+ copy(f.Imports[i:], f.Imports[i+1:])
+ f.Imports = f.Imports[:len(f.Imports)-1]
+ break
+ }
+ }
+
+ return
+}
+
+// rewriteImport rewrites any import of path oldPath to path newPath.
+func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) {
+ for _, imp := range f.Imports {
+ if importPath(imp) == oldPath {
+ rewrote = true
+ // record old End, because the default is to compute
+ // it using the length of imp.Path.Value.
+ imp.EndPos = imp.End()
+ imp.Path.Value = strconv.Quote(newPath)
+ }
+ }
+ return
+}
+
+func usesImport(f *ast.File, path string) (used bool) {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return
+ }
+
+ name := spec.Name.String()
+ switch name {
+ case "<nil>":
+ // If the package name is not explicitly specified,
+ // make an educated guess. This is not guaranteed to be correct.
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 {
+ name = path
+ } else {
+ name = path[lastSlash+1:]
+ }
+ case "_", ".":
+ // Not sure if this import is used - err on the side of caution.
+ return true
+ }
+
+ walk(f, func(n interface{}) {
+ sel, ok := n.(*ast.SelectorExpr)
+ if ok && isTopName(sel.X, name) {
+ used = true
+ }
+ })
+
+ return
+}
+
+func expr(s string) ast.Expr {
+ x, err := parser.ParseExpr(s)
+ if err != nil {
+ panic("parsing " + s + ": " + err.Error())
+ }
+ // Remove position information to avoid spurious newlines.
+ killPos(reflect.ValueOf(x))
+ return x
+}
+
+var posType = reflect.TypeOf(token.Pos(0))
+
+func killPos(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !v.IsNil() {
+ killPos(v.Elem())
+ }
+ case reflect.Slice:
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ killPos(v.Index(i))
+ }
+ case reflect.Struct:
+ n := v.NumField()
+ for i := 0; i < n; i++ {
+ f := v.Field(i)
+ if f.Type() == posType {
+ f.SetInt(0)
+ continue
+ }
+ killPos(f)
+ }
+ }
+}
+
+// A Rename describes a single renaming.
+type rename struct {
+ OldImport string // only apply rename if this import is present
+ NewImport string // add this import during rewrite
+ Old string // old name: p.T or *p.T
+ New string // new name: p.T or *p.T
+}
+
+func renameFix(tab []rename) func(*ast.File) bool {
+ return func(f *ast.File) bool {
+ return renameFixTab(f, tab)
+ }
+}
+
+func parseName(s string) (ptr bool, pkg, nam string) {
+ i := strings.Index(s, ".")
+ if i < 0 {
+ panic("parseName: invalid name " + s)
+ }
+ if strings.HasPrefix(s, "*") {
+ ptr = true
+ s = s[1:]
+ i--
+ }
+ pkg = s[:i]
+ nam = s[i+1:]
+ return
+}
+
+func renameFixTab(f *ast.File, tab []rename) bool {
+ fixed := false
+ added := map[string]bool{}
+ check := map[string]bool{}
+ for _, t := range tab {
+ if !imports(f, t.OldImport) {
+ continue
+ }
+ optr, opkg, onam := parseName(t.Old)
+ walk(f, func(n interface{}) {
+ np, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ x := *np
+ if optr {
+ p, ok := x.(*ast.StarExpr)
+ if !ok {
+ return
+ }
+ x = p.X
+ }
+ if !isPkgDot(x, opkg, onam) {
+ return
+ }
+ if t.NewImport != "" && !added[t.NewImport] {
+ addImport(f, t.NewImport)
+ added[t.NewImport] = true
+ }
+ *np = expr(t.New)
+ check[t.OldImport] = true
+ fixed = true
+ })
+ }
+
+ for ipath := range check {
+ if !usesImport(f, ipath) {
+ deleteImport(f, ipath)
+ }
+ }
+ return fixed
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main.go b/vendor/google.golang.org/appengine/cmd/aefix/main.go
new file mode 100644
index 000000000..8e193a6ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main.go
@@ -0,0 +1,258 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+var (
+ fset = token.NewFileSet()
+ exitCode = 0
+)
+
+var allowedRewrites = flag.String("r", "",
+ "restrict the rewrites to this comma-separated list")
+
+var forceRewrites = flag.String("force", "",
+ "force these fixes to run even if the code looks updated")
+
+var allowed, force map[string]bool
+
+var doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files")
+
+// enable for debugging fix failures
+const debug = false // display incorrectly reformatted source and exit
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: aefix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n")
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n")
+ sort.Sort(byName(fixes))
+ for _, f := range fixes {
+ fmt.Fprintf(os.Stderr, "\n%s\n", f.name)
+ desc := strings.TrimSpace(f.desc)
+ desc = strings.Replace(desc, "\n", "\n\t", -1)
+ fmt.Fprintf(os.Stderr, "\t%s\n", desc)
+ }
+ os.Exit(2)
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ sort.Sort(byDate(fixes))
+
+ if *allowedRewrites != "" {
+ allowed = make(map[string]bool)
+ for _, f := range strings.Split(*allowedRewrites, ",") {
+ allowed[f] = true
+ }
+ }
+
+ if *forceRewrites != "" {
+ force = make(map[string]bool)
+ for _, f := range strings.Split(*forceRewrites, ",") {
+ force[f] = true
+ }
+ }
+
+ if flag.NArg() == 0 {
+ if err := processFile("standard input", true); err != nil {
+ report(err)
+ }
+ os.Exit(exitCode)
+ }
+
+ for i := 0; i < flag.NArg(); i++ {
+ path := flag.Arg(i)
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, false); err != nil {
+ report(err)
+ }
+ }
+ }
+
+ os.Exit(exitCode)
+}
+
+const parserMode = parser.ParseComments
+
+func gofmtFile(f *ast.File) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, f); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func processFile(filename string, useStdin bool) error {
+ var f *os.File
+ var err error
+ var fixlog bytes.Buffer
+
+ if useStdin {
+ f = os.Stdin
+ } else {
+ f, err = os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ }
+
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ if err != nil {
+ return err
+ }
+
+ // Apply all fixes to file.
+ newFile := file
+ fixed := false
+ for _, fix := range fixes {
+ if allowed != nil && !allowed[fix.name] {
+ continue
+ }
+ if fix.f(newFile) {
+ fixed = true
+ fmt.Fprintf(&fixlog, " %s", fix.name)
+
+ // AST changed.
+ // Print and parse, to update any missing scoping
+ // or position information for subsequent fixers.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+ newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode)
+ if err != nil {
+ if debug {
+ fmt.Printf("%s", newSrc)
+ report(err)
+ os.Exit(exitCode)
+ }
+ return err
+ }
+ }
+ }
+ if !fixed {
+ return nil
+ }
+ fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:])
+
+ // Print AST. We did that after each fix, so this appears
+ // redundant, but it is necessary to generate gofmt-compatible
+ // source code in a few cases. The official gofmt style is the
+ // output of the printer run on a standard AST generated by the parser,
+ // but the source we generated inside the loop above is the
+ // output of the printer run on a mangled AST generated by a fixer.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+
+ if *doDiff {
+ data, err := diff(src, newSrc)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff %s fixed/%s\n", filename, filename)
+ os.Stdout.Write(data)
+ return nil
+ }
+
+ if useStdin {
+ os.Stdout.Write(newSrc)
+ return nil
+ }
+
+ return ioutil.WriteFile(f.Name(), newSrc, 0)
+}
+
+var gofmtBuf bytes.Buffer
+
+func gofmt(n interface{}) string {
+ gofmtBuf.Reset()
+ if err := format.Node(&gofmtBuf, fset, n); err != nil {
+ return "<" + err.Error() + ">"
+ }
+ return gofmtBuf.String()
+}
+
+func report(err error) {
+ scanner.PrintError(os.Stderr, err)
+ exitCode = 2
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, false)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main_test.go b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
new file mode 100644
index 000000000..2151bf29e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
@@ -0,0 +1,129 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "go/parser"
+ "strings"
+ "testing"
+)
+
+type testCase struct {
+ Name string
+ Fn func(*ast.File) bool
+ In string
+ Out string
+}
+
+var testCases []testCase
+
+func addTestCases(t []testCase, fn func(*ast.File) bool) {
+ // Fill in fn to avoid repetition in definitions.
+ if fn != nil {
+ for i := range t {
+ if t[i].Fn == nil {
+ t[i].Fn = fn
+ }
+ }
+ }
+ testCases = append(testCases, t...)
+}
+
+func fnop(*ast.File) bool { return false }
+
+func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) {
+ file, err := parser.ParseFile(fset, desc, in, parserMode)
+ if err != nil {
+ t.Errorf("%s: parsing: %v", desc, err)
+ return
+ }
+
+ outb, err := gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+ if s := string(outb); in != s && mustBeGofmt {
+ t.Errorf("%s: not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s",
+ desc, desc, in, desc, s)
+ tdiff(t, in, s)
+ return
+ }
+
+ if fn == nil {
+ for _, fix := range fixes {
+ if fix.f(file) {
+ fixed = true
+ }
+ }
+ } else {
+ fixed = fn(file)
+ }
+
+ outb, err = gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+
+ return string(outb), fixed, true
+}
+
+func TestRewrite(t *testing.T) {
+ for _, tt := range testCases {
+ // Apply fix: should get tt.Out.
+ out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true)
+ if !ok {
+ continue
+ }
+
+ // reformat to get printing right
+ out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false)
+ if !ok {
+ continue
+ }
+
+ if out != tt.Out {
+ t.Errorf("%s: incorrect output.\n", tt.Name)
+ if !strings.HasPrefix(tt.Name, "testdata/") {
+ t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out)
+ }
+ tdiff(t, out, tt.Out)
+ continue
+ }
+
+ if changed := out != tt.In; changed != fixed {
+ t.Errorf("%s: changed=%v != fixed=%v", tt.Name, changed, fixed)
+ continue
+ }
+
+ // Should not change if run again.
+ out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true)
+ if !ok {
+ continue
+ }
+
+ if fixed2 {
+ t.Errorf("%s: applied fixes during second round", tt.Name)
+ continue
+ }
+
+ if out2 != out {
+ t.Errorf("%s: changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s",
+ tt.Name, out, out2)
+ tdiff(t, out, out2)
+ }
+ }
+}
+
+func tdiff(t *testing.T, a, b string) {
+ data, err := diff([]byte(a), []byte(b))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ t.Error(string(data))
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
new file mode 100644
index 000000000..d54d37547
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
@@ -0,0 +1,673 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "reflect"
+ "strings"
+)
+
+// Partial type checker.
+//
+// The fact that it is partial is very important: the input is
+// an AST and a description of some type information to
+// assume about one or more packages, but not all the
+// packages that the program imports. The checker is
+// expected to do as much as it can with what it has been
+// given. There is not enough information supplied to do
+// a full type check, but the type checker is expected to
+// apply information that can be derived from variable
+// declarations, function and method returns, and type switches
+// as far as it can, so that the caller can still tell the types
+// of expression relevant to a particular fix.
+//
+// TODO(rsc,gri): Replace with go/typechecker.
+// Doing that could be an interesting test case for go/typechecker:
+// the constraints about working with partial information will
+// likely exercise it in interesting ways. The ideal interface would
+// be to pass typecheck a map from importpath to package API text
+// (Go source code), but for now we use data structures (TypeConfig, Type).
+//
+// The strings mostly use gofmt form.
+//
+// A Field or FieldList has as its type a comma-separated list
+// of the types of the fields. For example, the field list
+// x, y, z int
+// has type "int, int, int".
+
+// The prefix "type " is the type of a type.
+// For example, given
+// var x int
+// type T int
+// x's type is "int" but T's type is "type int".
+// mkType inserts the "type " prefix.
+// getType removes it.
+// isType tests for it.
+
+func mkType(t string) string {
+ return "type " + t
+}
+
+func getType(t string) string {
+ if !isType(t) {
+ return ""
+ }
+ return t[len("type "):]
+}
+
+func isType(t string) bool {
+ return strings.HasPrefix(t, "type ")
+}
+
+// TypeConfig describes the universe of relevant types.
+// For ease of creation, the types are all referred to by string
+// name (e.g., "reflect.Value"). TypeByName is the only place
+// where the strings are resolved.
+
+type TypeConfig struct {
+ Type map[string]*Type
+ Var map[string]string
+ Func map[string]string
+}
+
+// typeof returns the type of the given name, which may be of
+// the form "x" or "p.X".
+func (cfg *TypeConfig) typeof(name string) string {
+ if cfg.Var != nil {
+ if t := cfg.Var[name]; t != "" {
+ return t
+ }
+ }
+ if cfg.Func != nil {
+ if t := cfg.Func[name]; t != "" {
+ return "func()" + t
+ }
+ }
+ return ""
+}
+
+// Type describes the Fields and Methods of a type.
+// If the field or method cannot be found there, it is next
+// looked for in the Embed list.
+type Type struct {
+ Field map[string]string // map field name to type
+ Method map[string]string // map method name to comma-separated return types (should start with "func ")
+ Embed []string // list of types this type embeds (for extra methods)
+ Def string // definition of named type
+}
+
+// dot returns the type of "typ.name", making its decision
+// using the type information in cfg.
+func (typ *Type) dot(cfg *TypeConfig, name string) string {
+ if typ.Field != nil {
+ if t := typ.Field[name]; t != "" {
+ return t
+ }
+ }
+ if typ.Method != nil {
+ if t := typ.Method[name]; t != "" {
+ return t
+ }
+ }
+
+ for _, e := range typ.Embed {
+ etyp := cfg.Type[e]
+ if etyp != nil {
+ if t := etyp.dot(cfg, name); t != "" {
+ return t
+ }
+ }
+ }
+
+ return ""
+}
+
+// typecheck type checks the AST f assuming the information in cfg.
+// It returns two maps with type information:
+// typeof maps AST nodes to type information in gofmt string form.
+// assign maps type strings to lists of expressions that were assigned
+// to values of another type that were assigned to that type.
+func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) {
+ typeof = make(map[interface{}]string)
+ assign = make(map[string][]interface{})
+ cfg1 := &TypeConfig{}
+ *cfg1 = *cfg // make copy so we can add locally
+ copied := false
+
+ // gather function declarations
+ for _, decl := range f.Decls {
+ fn, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ typecheck1(cfg, fn.Type, typeof, assign)
+ t := typeof[fn.Type]
+ if fn.Recv != nil {
+ // The receiver must be a type.
+ rcvr := typeof[fn.Recv]
+ if !isType(rcvr) {
+ if len(fn.Recv.List) != 1 {
+ continue
+ }
+ rcvr = mkType(gofmt(fn.Recv.List[0].Type))
+ typeof[fn.Recv.List[0].Type] = rcvr
+ }
+ rcvr = getType(rcvr)
+ if rcvr != "" && rcvr[0] == '*' {
+ rcvr = rcvr[1:]
+ }
+ typeof[rcvr+"."+fn.Name.Name] = t
+ } else {
+ if isType(t) {
+ t = getType(t)
+ } else {
+ t = gofmt(fn.Type)
+ }
+ typeof[fn.Name] = t
+
+ // Record typeof[fn.Name.Obj] for future references to fn.Name.
+ typeof[fn.Name.Obj] = t
+ }
+ }
+
+ // gather struct declarations
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if ok {
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if cfg1.Type[s.Name.Name] != nil {
+ break
+ }
+ if !copied {
+ copied = true
+ // Copy map lazily: it's time.
+ cfg1.Type = make(map[string]*Type)
+ for k, v := range cfg.Type {
+ cfg1.Type[k] = v
+ }
+ }
+ t := &Type{Field: map[string]string{}}
+ cfg1.Type[s.Name.Name] = t
+ switch st := s.Type.(type) {
+ case *ast.StructType:
+ for _, f := range st.Fields.List {
+ for _, n := range f.Names {
+ t.Field[n.Name] = gofmt(f.Type)
+ }
+ }
+ case *ast.ArrayType, *ast.StarExpr, *ast.MapType:
+ t.Def = gofmt(st)
+ }
+ }
+ }
+ }
+ }
+
+ typecheck1(cfg1, f, typeof, assign)
+ return typeof, assign
+}
+
+func makeExprList(a []*ast.Ident) []ast.Expr {
+ var b []ast.Expr
+ for _, x := range a {
+ b = append(b, x)
+ }
+ return b
+}
+
+// Typecheck1 is the recursive form of typecheck.
+// It is like typecheck but adds to the information in typeof
+// instead of allocating a new map.
+func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) {
+ // set sets the type of n to typ.
+ // If isDecl is true, n is being declared.
+ set := func(n ast.Expr, typ string, isDecl bool) {
+ if typeof[n] != "" || typ == "" {
+ if typeof[n] != typ {
+ assign[typ] = append(assign[typ], n)
+ }
+ return
+ }
+ typeof[n] = typ
+
+ // If we obtained typ from the declaration of x
+ // propagate the type to all the uses.
+ // The !isDecl case is a cheat here, but it makes
+ // up in some cases for not paying attention to
+ // struct fields. The real type checker will be
+ // more accurate so we won't need the cheat.
+ if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") {
+ typeof[id.Obj] = typ
+ }
+ }
+
+ // Type-check an assignment lhs = rhs.
+ // If isDecl is true, this is := so we can update
+ // the types of the objects that lhs refers to.
+ typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) {
+ if len(lhs) > 1 && len(rhs) == 1 {
+ if _, ok := rhs[0].(*ast.CallExpr); ok {
+ t := split(typeof[rhs[0]])
+ // Lists should have same length but may not; pair what can be paired.
+ for i := 0; i < len(lhs) && i < len(t); i++ {
+ set(lhs[i], t[i], isDecl)
+ }
+ return
+ }
+ }
+ if len(lhs) == 1 && len(rhs) == 2 {
+ // x = y, ok
+ rhs = rhs[:1]
+ } else if len(lhs) == 2 && len(rhs) == 1 {
+ // x, ok = y
+ lhs = lhs[:1]
+ }
+
+ // Match as much as we can.
+ for i := 0; i < len(lhs) && i < len(rhs); i++ {
+ x, y := lhs[i], rhs[i]
+ if typeof[y] != "" {
+ set(x, typeof[y], isDecl)
+ } else {
+ set(y, typeof[x], false)
+ }
+ }
+ }
+
+ expand := func(s string) string {
+ typ := cfg.Type[s]
+ if typ != nil && typ.Def != "" {
+ return typ.Def
+ }
+ return s
+ }
+
+ // The main type check is a recursive algorithm implemented
+ // by walkBeforeAfter(n, before, after).
+ // Most of it is bottom-up, but in a few places we need
+ // to know the type of the function we are checking.
+ // The before function records that information on
+ // the curfn stack.
+ var curfn []*ast.FuncType
+
+ before := func(n interface{}) {
+ // push function type on stack
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ curfn = append(curfn, n.Type)
+ case *ast.FuncLit:
+ curfn = append(curfn, n.Type)
+ }
+ }
+
+ // After is the real type checker.
+ after := func(n interface{}) {
+ if n == nil {
+ return
+ }
+ if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace
+ defer func() {
+ if t := typeof[n]; t != "" {
+ pos := fset.Position(n.(ast.Node).Pos())
+ fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t)
+ }
+ }()
+ }
+
+ switch n := n.(type) {
+ case *ast.FuncDecl, *ast.FuncLit:
+ // pop function type off stack
+ curfn = curfn[:len(curfn)-1]
+
+ case *ast.FuncType:
+ typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results])))
+
+ case *ast.FieldList:
+ // Field list is concatenation of sub-lists.
+ t := ""
+ for _, field := range n.List {
+ if t != "" {
+ t += ", "
+ }
+ t += typeof[field]
+ }
+ typeof[n] = t
+
+ case *ast.Field:
+ // Field is one instance of the type per name.
+ all := ""
+ t := typeof[n.Type]
+ if !isType(t) {
+ // Create a type, because it is typically *T or *p.T
+ // and we might care about that type.
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ if len(n.Names) == 0 {
+ all = t
+ } else {
+ for _, id := range n.Names {
+ if all != "" {
+ all += ", "
+ }
+ all += t
+ typeof[id.Obj] = t
+ typeof[id] = t
+ }
+ }
+ typeof[n] = all
+
+ case *ast.ValueSpec:
+ // var declaration. Use type if present.
+ if n.Type != nil {
+ t := typeof[n.Type]
+ if !isType(t) {
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ for _, id := range n.Names {
+ set(id, t, true)
+ }
+ }
+ // Now treat same as assignment.
+ typecheckAssign(makeExprList(n.Names), n.Values, true)
+
+ case *ast.AssignStmt:
+ typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE)
+
+ case *ast.Ident:
+ // Identifier can take its type from underlying object.
+ if t := typeof[n.Obj]; t != "" {
+ typeof[n] = t
+ }
+
+ case *ast.SelectorExpr:
+ // Field or method.
+ name := n.Sel.Name
+ if t := typeof[n.X]; t != "" {
+ if strings.HasPrefix(t, "*") {
+ t = t[1:] // implicit *
+ }
+ if typ := cfg.Type[t]; typ != nil {
+ if t := typ.dot(cfg, name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+ tt := typeof[t+"."+name]
+ if isType(tt) {
+ typeof[n] = getType(tt)
+ return
+ }
+ }
+ // Package selector.
+ if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil {
+ str := x.Name + "." + name
+ if cfg.Type[str] != nil {
+ typeof[n] = mkType(str)
+ return
+ }
+ if t := cfg.typeof(x.Name + "." + name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+
+ case *ast.CallExpr:
+ // make(T) has type T.
+ if isTopName(n.Fun, "make") && len(n.Args) >= 1 {
+ typeof[n] = gofmt(n.Args[0])
+ return
+ }
+ // new(T) has type *T
+ if isTopName(n.Fun, "new") && len(n.Args) == 1 {
+ typeof[n] = "*" + gofmt(n.Args[0])
+ return
+ }
+ // Otherwise, use type of function to determine arguments.
+ t := typeof[n.Fun]
+ in, out := splitFunc(t)
+ if in == nil && out == nil {
+ return
+ }
+ typeof[n] = join(out)
+ for i, arg := range n.Args {
+ if i >= len(in) {
+ break
+ }
+ if typeof[arg] == "" {
+ typeof[arg] = in[i]
+ }
+ }
+
+ case *ast.TypeAssertExpr:
+ // x.(type) has type of x.
+ if n.Type == nil {
+ typeof[n] = typeof[n.X]
+ return
+ }
+ // x.(T) has type T.
+ if t := typeof[n.Type]; isType(t) {
+ typeof[n] = getType(t)
+ } else {
+ typeof[n] = gofmt(n.Type)
+ }
+
+ case *ast.SliceExpr:
+ // x[i:j] has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.IndexExpr:
+ // x[i] has key type of x's type.
+ t := expand(typeof[n.X])
+ if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") {
+ // Lazy: assume there are no nested [] in the array
+ // length or map key type.
+ if i := strings.Index(t, "]"); i >= 0 {
+ typeof[n] = t[i+1:]
+ }
+ }
+
+ case *ast.StarExpr:
+ // *x for x of type *T has type T when x is an expr.
+ // We don't use the result when *x is a type, but
+ // compute it anyway.
+ t := expand(typeof[n.X])
+ if isType(t) {
+ typeof[n] = "type *" + getType(t)
+ } else if strings.HasPrefix(t, "*") {
+ typeof[n] = t[len("*"):]
+ }
+
+ case *ast.UnaryExpr:
+ // &x for x of type T has type *T.
+ t := typeof[n.X]
+ if t != "" && n.Op == token.AND {
+ typeof[n] = "*" + t
+ }
+
+ case *ast.CompositeLit:
+ // T{...} has type T.
+ typeof[n] = gofmt(n.Type)
+
+ case *ast.ParenExpr:
+ // (x) has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.RangeStmt:
+ t := expand(typeof[n.X])
+ if t == "" {
+ return
+ }
+ var key, value string
+ if t == "string" {
+ key, value = "int", "rune"
+ } else if strings.HasPrefix(t, "[") {
+ key = "int"
+ if i := strings.Index(t, "]"); i >= 0 {
+ value = t[i+1:]
+ }
+ } else if strings.HasPrefix(t, "map[") {
+ if i := strings.Index(t, "]"); i >= 0 {
+ key, value = t[4:i], t[i+1:]
+ }
+ }
+ changed := false
+ if n.Key != nil && key != "" {
+ changed = true
+ set(n.Key, key, n.Tok == token.DEFINE)
+ }
+ if n.Value != nil && value != "" {
+ changed = true
+ set(n.Value, value, n.Tok == token.DEFINE)
+ }
+ // Ugly failure of vision: already type-checked body.
+ // Do it again now that we have that type info.
+ if changed {
+ typecheck1(cfg, n.Body, typeof, assign)
+ }
+
+ case *ast.TypeSwitchStmt:
+ // Type of variable changes for each case in type switch,
+ // but go/parser generates just one variable.
+ // Repeat type check for each case with more precise
+ // type information.
+ as, ok := n.Assign.(*ast.AssignStmt)
+ if !ok {
+ return
+ }
+ varx, ok := as.Lhs[0].(*ast.Ident)
+ if !ok {
+ return
+ }
+ t := typeof[varx]
+ for _, cas := range n.Body.List {
+ cas := cas.(*ast.CaseClause)
+ if len(cas.List) == 1 {
+ // Variable has specific type only when there is
+ // exactly one type in the case list.
+ if tt := typeof[cas.List[0]]; isType(tt) {
+ tt = getType(tt)
+ typeof[varx] = tt
+ typeof[varx.Obj] = tt
+ typecheck1(cfg, cas.Body, typeof, assign)
+ }
+ }
+ }
+ // Restore t.
+ typeof[varx] = t
+ typeof[varx.Obj] = t
+
+ case *ast.ReturnStmt:
+ if len(curfn) == 0 {
+ // Probably can't happen.
+ return
+ }
+ f := curfn[len(curfn)-1]
+ res := n.Results
+ if f.Results != nil {
+ t := split(typeof[f.Results])
+ for i := 0; i < len(res) && i < len(t); i++ {
+ set(res[i], t[i], false)
+ }
+ }
+ }
+ }
+ walkBeforeAfter(f, before, after)
+}
+
+// Convert between function type strings and lists of types.
+// Using strings makes this a little harder, but it makes
+// a lot of the rest of the code easier. This will all go away
+// when we can use go/typechecker directly.
+
+// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"].
+func splitFunc(s string) (in, out []string) {
+ if !strings.HasPrefix(s, "func(") {
+ return nil, nil
+ }
+
+ i := len("func(") // index of beginning of 'in' arguments
+ nparen := 0
+ for j := i; j < len(s); j++ {
+ switch s[j] {
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // found end of parameter list
+ out := strings.TrimSpace(s[j+1:])
+ if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' {
+ out = out[1 : len(out)-1]
+ }
+ return split(s[i:j]), split(out)
+ }
+ }
+ }
+ return nil, nil
+}
+
+// joinFunc is the inverse of splitFunc.
+func joinFunc(in, out []string) string {
+ outs := ""
+ if len(out) == 1 {
+ outs = " " + out[0]
+ } else if len(out) > 1 {
+ outs = " (" + join(out) + ")"
+ }
+ return "func(" + join(in) + ")" + outs
+}
+
+// split splits "int, float" into ["int", "float"] and splits "" into [].
+func split(s string) []string {
+ out := []string{}
+ i := 0 // current type being scanned is s[i:j].
+ nparen := 0
+ for j := 0; j < len(s); j++ {
+ switch s[j] {
+ case ' ':
+ if i == j {
+ i++
+ }
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // probably can't happen
+ return nil
+ }
+ case ',':
+ if nparen == 0 {
+ if i < j {
+ out = append(out, s[i:j])
+ }
+ i = j + 1
+ }
+ }
+ }
+ if nparen != 0 {
+ // probably can't happen
+ return nil
+ }
+ if i < len(s) {
+ out = append(out, s[i:])
+ }
+ return out
+}
+
+// join is the inverse of split.
+func join(x []string) string {
+ return strings.Join(x, ", ")
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 000000000..576bc5013
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,407 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ // ErrInvalidEntityType is returned when functions like Get or Next are
+ // passed a dst or src argument of invalid type.
+ ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+ // ErrInvalidKey is returned when an invalid key is presented.
+ ErrInvalidKey = errors.New("datastore: invalid key")
+ // ErrNoSuchEntity is returned when no entity was found for a given key.
+ ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key. If the key is invalid,
+// protoToKey will return the invalid key along with ErrInvalidKey.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Path.Element {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return k, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+ appID := k.appID
+ if appID == "" {
+ appID = defaultAppID
+ }
+ n := 0
+ for i := k; i != nil; i = i.parent {
+ n++
+ }
+ e := make([]*pb.Path_Element, n)
+ for i := k; i != nil; i = i.parent {
+ n--
+ e[n] = &pb.Path_Element{
+ Type: &i.kind,
+ }
+ // At most one of {Name,Id} should be set.
+ // Neither will be set for incomplete keys.
+ if i.stringID != "" {
+ e[n].Name = &i.stringID
+ } else if i.intID != 0 {
+ e[n].Id = &i.intID
+ }
+ }
+ var namespace *string
+ if k.namespace != "" {
+ namespace = proto.String(k.namespace)
+ }
+ return &pb.Reference{
+ App: proto.String(appID),
+ NameSpace: namespace,
+ Path: &pb.Path{
+ Element: e,
+ },
+ }
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+ ret := make([]*pb.Reference, len(key))
+ for i, k := range key {
+ ret[i] = keyToProto(appID, k)
+ }
+ return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+ invalid := false
+ for _, k := range key {
+ if !k.valid() {
+ invalid = true
+ break
+ }
+ }
+ if !invalid {
+ return nil
+ }
+ err := make(appengine.MultiError, len(key))
+ for i, k := range key {
+ if !k.valid() {
+ err[i] = ErrInvalidKey
+ }
+ }
+ return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Pathelement {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+ ref := keyToProto(defaultAppID, k)
+ pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+ for i, e := range ref.Path.Element {
+ pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+ Type: e.Type,
+ Id: e.Id,
+ Name: e.Name,
+ }
+ }
+ return &pb.PropertyValue_ReferenceValue{
+ App: ref.App,
+ NameSpace: ref.NameSpace,
+ Pathelement: pe,
+ }
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypePropertyLoadSaver
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+ multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ if v.Type() == typeOfPropertyList {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+ return multiArgTypePropertyLoadSaver, elemType
+ }
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Interface:
+ return multiArgTypeInterface, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c context.Context, key *Key, dst interface{}) error {
+ if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+ return ErrInvalidEntityType
+ }
+ err := GetMulti(c, []*Key{key}, []interface{}{dst})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c context.Context, key []*Key, dst interface{}) error {
+ v := reflect.ValueOf(dst)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return errors.New("datastore: dst has invalid type")
+ }
+ if len(key) != v.Len() {
+ return errors.New("datastore: key and dst slices have different length")
+ }
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.GetRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.GetResponse{}
+ if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
+ return err
+ }
+ if len(key) != len(res.Entity) {
+ return errors.New("datastore: internal error: server returned the wrong number of entities")
+ }
+ multiErr, any := make(appengine.MultiError, len(key)), false
+ for i, e := range res.Entity {
+ if e.Entity == nil {
+ multiErr[i] = ErrNoSuchEntity
+ } else {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+ elem.Set(reflect.New(elem.Type().Elem()))
+ }
+ multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+ }
+ if multiErr[i] != nil {
+ any = true
+ }
+ }
+ if any {
+ return multiErr
+ }
+ return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
+ k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+ if err != nil {
+ if me, ok := err.(appengine.MultiError); ok {
+ return nil, me[0]
+ }
+ return nil, err
+ }
+ return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
+ v := reflect.ValueOf(src)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return nil, errors.New("datastore: src has invalid type")
+ }
+ if len(key) != v.Len() {
+ return nil, errors.New("datastore: key and src slices have different length")
+ }
+ if len(key) == 0 {
+ return nil, nil
+ }
+ appID := internal.FullyQualifiedAppID(c)
+ if err := multiValid(key); err != nil {
+ return nil, err
+ }
+ req := &pb.PutRequest{}
+ for i := range key {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ sProto, err := saveEntity(appID, key[i], elem.Interface())
+ if err != nil {
+ return nil, err
+ }
+ req.Entity = append(req.Entity, sProto)
+ }
+ res := &pb.PutResponse{}
+ if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
+ return nil, err
+ }
+ if len(key) != len(res.Key) {
+ return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+ }
+ ret := make([]*Key, len(key))
+ for i := range ret {
+ var err error
+ ret[i], err = protoToKey(res.Key[i])
+ if err != nil || ret[i].Incomplete() {
+ return nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ }
+ return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c context.Context, key *Key) error {
+ err := DeleteMulti(c, []*Key{key})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c context.Context, key []*Key) error {
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.DeleteRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.DeleteResponse{}
+ return internal.Call(c, "datastore_v3", "Delete", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ // pb.Query is the only type that has a name_space field.
+ // All other namespace support in datastore is in the keys.
+ switch m := m.(type) {
+ case *pb.Query:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ }
+}
+
+func init() {
+ internal.NamespaceMods["datastore_v3"] = namespaceMod
+ internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go
new file mode 100644
index 000000000..b3888e9d1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go
@@ -0,0 +1,1744 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+const testAppID = "testApp"
+
+type (
+ myBlob []byte
+ myByte byte
+ myString string
+)
+
+func makeMyByteSlice(n int) []myByte {
+ b := make([]myByte, n)
+ for i := range b {
+ b[i] = myByte(i)
+ }
+ return b
+}
+
+func makeInt8Slice(n int) []int8 {
+ b := make([]int8, n)
+ for i := range b {
+ b[i] = int8(i)
+ }
+ return b
+}
+
+func makeUint8Slice(n int) []uint8 {
+ b := make([]uint8, n)
+ for i := range b {
+ b[i] = uint8(i)
+ }
+ return b
+}
+
+func newKey(stringID string, parent *Key) *Key {
+ return &Key{
+ kind: "kind",
+ stringID: stringID,
+ intID: 0,
+ parent: parent,
+ appID: testAppID,
+ }
+}
+
+var (
+ testKey0 = newKey("name0", nil)
+ testKey1a = newKey("name1", nil)
+ testKey1b = newKey("name1", nil)
+ testKey2a = newKey("name2", testKey0)
+ testKey2b = newKey("name2", testKey0)
+ testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4}
+ testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10}
+ testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34}
+
+ now = time.Unix(1e9, 0).UTC()
+)
+
+type B0 struct {
+ B []byte
+}
+
+type B1 struct {
+ B []int8
+}
+
+type B2 struct {
+ B myBlob
+}
+
+type B3 struct {
+ B []myByte
+}
+
+type B4 struct {
+ B [][]byte
+}
+
+type B5 struct {
+ B ByteString
+}
+
+type C0 struct {
+ I int
+ C chan int
+}
+
+type C1 struct {
+ I int
+ C *chan int
+}
+
+type C2 struct {
+ I int
+ C []chan int
+}
+
+type C3 struct {
+ C string
+}
+
+type E struct{}
+
+type G0 struct {
+ G appengine.GeoPoint
+}
+
+type G1 struct {
+ G []appengine.GeoPoint
+}
+
+type K0 struct {
+ K *Key
+}
+
+type K1 struct {
+ K []*Key
+}
+
+type S struct {
+ St string
+}
+
+type NoOmit struct {
+ A string
+ B int `datastore:"Bb"`
+ C bool `datastore:",noindex"`
+}
+
+type OmitAll struct {
+ A string `datastore:",omitempty"`
+ B int `datastore:"Bb,omitempty"`
+ C bool `datastore:",omitempty,noindex"`
+ F []int `datastore:",omitempty"`
+}
+
+type Omit struct {
+ A string `datastore:",omitempty"`
+ B int `datastore:"Bb,omitempty"`
+ C bool `datastore:",omitempty,noindex"`
+ F []int `datastore:",omitempty"`
+ S `datastore:",omitempty"`
+}
+
+type NoOmits struct {
+ No []NoOmit `datastore:",omitempty"`
+ S `datastore:",omitempty"`
+ Ss S `datastore:",omitempty"`
+}
+
+type N0 struct {
+ X0
+ Nonymous X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N1 struct {
+ X0
+ Nonymous []X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N2 struct {
+ N1 `datastore:"red"`
+ Green N1 `datastore:"green"`
+ Blue N1
+ White N1 `datastore:"-"`
+}
+
+type O0 struct {
+ I int64
+}
+
+type O1 struct {
+ I int32
+}
+
+type U0 struct {
+ U uint
+}
+
+type U1 struct {
+ U string
+}
+
+type T struct {
+ T time.Time
+}
+
+type X0 struct {
+ S string
+ I int
+ i int
+}
+
+type X1 struct {
+ S myString
+ I int32
+ J int64
+}
+
+type X2 struct {
+ Z string
+ i int
+}
+
+type X3 struct {
+ S bool
+ I int
+}
+
+type Y0 struct {
+ B bool
+ F []float64
+ G []float64
+}
+
+type Y1 struct {
+ B bool
+ F float64
+}
+
+type Y2 struct {
+ B bool
+ F []int64
+}
+
+type Tagged struct {
+ A int `datastore:"a,noindex"`
+ B []int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ // The "flatten" option is parsed but ignored for now.
+ F int `datastore:",noindex,flatten"`
+ G int `datastore:",flatten"`
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+
+ Y0 `datastore:"-"`
+ Z chan int `datastore:"-,"`
+}
+
+type InvalidTagged1 struct {
+ I int `datastore:"\t"`
+}
+
+type InvalidTagged2 struct {
+ I int
+ J int `datastore:"I"`
+}
+
+type Inner1 struct {
+ W int32
+ X string
+}
+
+type Inner2 struct {
+ Y float64
+}
+
+type Inner3 struct {
+ Z bool
+}
+
+type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+}
+
+type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+}
+
+type Dotted struct {
+ A DottedA `datastore:"A0.A1.A2"`
+}
+
+type DottedA struct {
+ B DottedB `datastore:"B3"`
+}
+
+type DottedB struct {
+ C int `datastore:"C4.C5"`
+}
+
+type SliceOfSlices struct {
+ I int
+ S []struct {
+ J int
+ F []float64
+ }
+}
+
+type Recursive struct {
+ I int
+ R []Recursive
+}
+
+type MutuallyRecursive0 struct {
+ I int
+ R []MutuallyRecursive1
+}
+
+type MutuallyRecursive1 struct {
+ I int
+ R []MutuallyRecursive0
+}
+
+type Doubler struct {
+ S string
+ I int64
+ B bool
+}
+
+type Repeat struct {
+ Key string
+ Value []byte
+}
+
+type Repeated struct {
+ Repeats []Repeat
+}
+
+func (d *Doubler) Load(props []Property) error {
+ return LoadStruct(d, props)
+}
+
+type EmbeddedTime struct {
+ time.Time
+}
+
+type SpecialTime struct {
+ MyTime EmbeddedTime
+}
+
+func (d *Doubler) Save() ([]Property, error) {
+ // Save the default Property slice to an in-memory buffer (a PropertyList).
+ props, err := SaveStruct(d)
+ if err != nil {
+ return nil, err
+ }
+ var list PropertyList
+ if err := list.Load(props); err != nil {
+ return nil, err
+ }
+
+ // Edit that PropertyList, and send it on.
+ for i := range list {
+ switch v := list[i].Value.(type) {
+ case string:
+ // + means string concatenation.
+ list[i].Value = v + v
+ case int64:
+ // + means integer addition.
+ list[i].Value = v + v
+ }
+ }
+ return list.Save()
+}
+
+var _ PropertyLoadSaver = (*Doubler)(nil)
+
+type Deriver struct {
+ S, Derived, Ignored string
+}
+
+func (e *Deriver) Load(props []Property) error {
+ for _, p := range props {
+ if p.Name != "S" {
+ continue
+ }
+ e.S = p.Value.(string)
+ e.Derived = "derived+" + e.S
+ }
+ return nil
+}
+
+func (e *Deriver) Save() ([]Property, error) {
+ return []Property{
+ {
+ Name: "S",
+ Value: e.S,
+ },
+ }, nil
+}
+
+var _ PropertyLoadSaver = (*Deriver)(nil)
+
+type BadMultiPropEntity struct{}
+
+func (e *BadMultiPropEntity) Load(props []Property) error {
+ return errors.New("unimplemented")
+}
+
+func (e *BadMultiPropEntity) Save() ([]Property, error) {
+ // Write multiple properties with the same name "I", but Multiple is false.
+ var props []Property
+ for i := 0; i < 3; i++ {
+ props = append(props, Property{
+ Name: "I",
+ Value: int64(i),
+ })
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil)
+
+type BK struct {
+ Key appengine.BlobKey
+}
+
+type testCase struct {
+ desc string
+ src interface{}
+ want interface{}
+ putErr string
+ getErr string
+}
+
+var testCases = []testCase{
+ {
+ "chan save fails",
+ &C0{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "*chan save fails",
+ &C1{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "[]chan save fails",
+ &C2{I: -1, C: make([]chan int, 8)},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "chan load fails",
+ &C3{C: "not a chan"},
+ &C0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "*chan load fails",
+ &C3{C: "not a *chan"},
+ &C1{},
+ "",
+ "type mismatch",
+ },
+ {
+ "[]chan load fails",
+ &C3{C: "not a []chan"},
+ &C2{},
+ "",
+ "type mismatch",
+ },
+ {
+ "empty struct",
+ &E{},
+ &E{},
+ "",
+ "",
+ },
+ {
+ "geopoint",
+ &G0{G: testGeoPt0},
+ &G0{G: testGeoPt0},
+ "",
+ "",
+ },
+ {
+ "geopoint invalid",
+ &G0{G: testBadGeoPt},
+ &G0{},
+ "invalid GeoPoint value",
+ "",
+ },
+ {
+ "geopoint as props",
+ &G0{G: testGeoPt0},
+ &PropertyList{
+ Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "geopoint slice",
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ "",
+ "",
+ },
+ {
+ "omit empty, all",
+ &OmitAll{},
+ new(PropertyList),
+ "",
+ "",
+ },
+ {
+ "omit empty",
+ &Omit{},
+ &PropertyList{
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty, fields populated",
+ &Omit{
+ A: "a",
+ B: 10,
+ C: true,
+ F: []int{11},
+ },
+ &PropertyList{
+ Property{Name: "A", Value: "a", NoIndex: false, Multiple: false},
+ Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false},
+ Property{Name: "C", Value: true, NoIndex: true, Multiple: false},
+ Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty, fields populated",
+ &Omit{
+ A: "a",
+ B: 10,
+ C: true,
+ F: []int{11},
+ S: S{St: "string"},
+ },
+ &PropertyList{
+ Property{Name: "A", Value: "a", NoIndex: false, Multiple: false},
+ Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false},
+ Property{Name: "C", Value: true, NoIndex: true, Multiple: false},
+ Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "St", Value: "string", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty does not propagate",
+ &NoOmits{
+ No: []NoOmit{
+ NoOmit{},
+ },
+ S: S{},
+ Ss: S{},
+ },
+ &PropertyList{
+ Property{Name: "No.A", Value: "", NoIndex: false, Multiple: true},
+ Property{Name: "No.Bb", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "No.C", Value: false, NoIndex: true, Multiple: true},
+ Property{Name: "Ss.St", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false}},
+ "",
+ "",
+ },
+ {
+ "key",
+ &K0{K: testKey1a},
+ &K0{K: testKey1b},
+ "",
+ "",
+ },
+ {
+ "key with parent",
+ &K0{K: testKey2a},
+ &K0{K: testKey2b},
+ "",
+ "",
+ },
+ {
+ "nil key",
+ &K0{},
+ &K0{},
+ "",
+ "",
+ },
+ {
+ "all nil keys in slice",
+ &K1{[]*Key{nil, nil}},
+ &K1{[]*Key{nil, nil}},
+ "",
+ "",
+ },
+ {
+ "some nil keys in slice",
+ &K1{[]*Key{testKey1a, nil, testKey2a}},
+ &K1{[]*Key{testKey1b, nil, testKey2b}},
+ "",
+ "",
+ },
+ {
+ "overflow",
+ &O0{I: 1 << 48},
+ &O1{},
+ "",
+ "overflow",
+ },
+ {
+ "time",
+ &T{T: time.Unix(1e9, 0)},
+ &T{T: time.Unix(1e9, 0)},
+ "",
+ "",
+ },
+ {
+ "time as props",
+ &T{T: time.Unix(1e9, 0)},
+ &PropertyList{
+ Property{Name: "T", Value: time.Unix(1e9, 0).UTC(), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "uint save",
+ &U0{U: 1},
+ &U0{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "uint load",
+ &U1{U: "not a uint"},
+ &U0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "zero",
+ &X0{},
+ &X0{},
+ "",
+ "",
+ },
+ {
+ "basic",
+ &X0{S: "one", I: 2, i: 3},
+ &X0{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "save string/int load myString/int32",
+ &X0{S: "one", I: 2, i: 3},
+ &X1{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "missing fields",
+ &X0{S: "one", I: 2, i: 3},
+ &X2{},
+ "",
+ "no such struct field",
+ },
+ {
+ "save string load bool",
+ &X0{S: "one", I: 2, i: 3},
+ &X3{I: 2},
+ "",
+ "type mismatch",
+ },
+ {
+ "basic slice",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ "",
+ "",
+ },
+ {
+ "save []float64 load float64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y1{B: true},
+ "",
+ "requires a slice",
+ },
+ {
+ "save []float64 load []int64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y2{B: true},
+ "",
+ "type mismatch",
+ },
+ {
+ "single slice is too long",
+ &Y0{F: make([]float64, maxIndexedProperties+1)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "two slices are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "one slice and one scalar are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), B: true},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "slice of slices of bytes",
+ &Repeated{
+ Repeats: []Repeat{
+ {
+ Key: "key 1",
+ Value: []byte("value 1"),
+ },
+ {
+ Key: "key 2",
+ Value: []byte("value 2"),
+ },
+ },
+ },
+ &Repeated{
+ Repeats: []Repeat{
+ {
+ Key: "key 1",
+ Value: []byte("value 1"),
+ },
+ {
+ Key: "key 2",
+ Value: []byte("value 2"),
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "long blob",
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "long []int8 is too long",
+ &B1{B: makeInt8Slice(maxIndexedProperties + 1)},
+ &B1{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "short []int8",
+ &B1{B: makeInt8Slice(3)},
+ &B1{B: makeInt8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long myBlob",
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short myBlob",
+ &B2{B: makeUint8Slice(3)},
+ &B2{B: makeUint8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long []myByte",
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short []myByte",
+ &B3{B: makeMyByteSlice(3)},
+ &B3{B: makeMyByteSlice(3)},
+ "",
+ "",
+ },
+ {
+ "slice of blobs",
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ "",
+ "",
+ },
+ {
+ "short ByteString",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &B5{B: ByteString(makeUint8Slice(3))},
+ "",
+ "",
+ },
+ {
+ "short ByteString as props",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &PropertyList{
+ Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "short ByteString into string",
+ &B5{B: ByteString("legacy")},
+ &struct{ B string }{"legacy"},
+ "",
+ "",
+ },
+ {
+ "[]byte must be noindex",
+ &PropertyList{
+ Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false},
+ },
+ nil,
+ "cannot index a []byte valued Property",
+ "",
+ },
+ {
+ "save tagged load props",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, F: 6, G: 7, I: 8, J: 9},
+ &PropertyList{
+ // A and B are renamed to a and b; A and C are noindex, I is ignored.
+ // Indexed properties are loaded before raw properties. Thus, the
+ // result is: b, b, b, D, E, a, c.
+ Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false},
+ Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false},
+ Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: int64(6), NoIndex: true, Multiple: false},
+ Property{Name: "G", Value: int64(7), NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: int64(9), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false},
+ Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true},
+ },
+ "",
+ "",
+ },
+ {
+ "save tagged load tagged",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7},
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7},
+ "",
+ "",
+ },
+ {
+ "save props load tagged",
+ &PropertyList{
+ Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false},
+ },
+ &Tagged{A: 12},
+ "",
+ `cannot load field "A"`,
+ },
+ {
+ "invalid tagged1",
+ &InvalidTagged1{I: 1},
+ &InvalidTagged1{},
+ "struct tag has invalid property name",
+ "",
+ },
+ {
+ "invalid tagged2",
+ &InvalidTagged2{I: 1, J: 2},
+ &InvalidTagged2{},
+ "struct tag has repeated property name",
+ "",
+ },
+ {
+ "doubler",
+ &Doubler{S: "s", I: 1, B: true},
+ &Doubler{S: "ss", I: 2, B: true},
+ "",
+ "",
+ },
+ {
+ "save struct load props",
+ &X0{S: "s", I: 1},
+ &PropertyList{
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load struct",
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ &X0{S: "s", I: 1},
+ "",
+ "",
+ },
+ {
+ "nil-value props",
+ &PropertyList{
+ Property{Name: "I", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "B", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "K", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "T", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ },
+ &struct {
+ I int64
+ B bool
+ S string
+ F float64
+ K *Key
+ T time.Time
+ J []int64
+ }{
+ J: []int64{0, 7, 0},
+ },
+ "",
+ "",
+ },
+ {
+ "save outer load props",
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load outer-equivalent",
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ "",
+ "",
+ },
+ {
+ "save outer-equivalent load outer",
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names save",
+ &Dotted{A: DottedA{B: DottedB{C: 88}}},
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names load",
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false},
+ },
+ &Dotted{A: DottedA{B: DottedB{C: 99}}},
+ "",
+ "",
+ },
+ {
+ "save struct load deriver",
+ &X0{S: "s", I: 1},
+ &Deriver{S: "s", Derived: "derived+s"},
+ "",
+ "",
+ },
+ {
+ "save deriver load struct",
+ &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"},
+ &X0{S: "s"},
+ "",
+ "",
+ },
+ {
+ "bad multi-prop entity",
+ &BadMultiPropEntity{},
+ &BadMultiPropEntity{},
+ "Multiple is false",
+ "",
+ },
+ // Regression: CL 25062824 broke handling of appengine.BlobKey fields.
+ {
+ "appengine.BlobKey",
+ &BK{Key: "blah"},
+ &BK{Key: "blah"},
+ "",
+ "",
+ },
+ {
+ "zero time.Time",
+ &T{T: time.Time{}},
+ &T{T: time.Time{}},
+ "",
+ "",
+ },
+ {
+ "time.Time near Unix zero time",
+ &T{T: time.Unix(0, 4e3)},
+ &T{T: time.Unix(0, 4e3)},
+ "",
+ "",
+ },
+ {
+ "time.Time, far in the future",
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ "",
+ "",
+ },
+ {
+ "time.Time, very far in the past",
+ &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "time.Time, very far in the future",
+ &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "structs",
+ &N0{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: X0{S: "four", I: 5, i: 6},
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N0{
+ X0: X0{S: "one", I: 2},
+ Nonymous: X0{S: "four", I: 5},
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "slice of structs",
+ &N1{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: []X0{
+ {S: "four", I: 5, i: 6},
+ {S: "seven", I: 8, i: 9},
+ {S: "ten", I: 11, i: 12},
+ {S: "thirteen", I: 14, i: 15},
+ },
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N1{
+ X0: X0{S: "one", I: 2},
+ Nonymous: []X0{
+ {S: "four", I: 5},
+ {S: "seven", I: 8},
+ {S: "ten", I: 11},
+ {S: "thirteen", I: 14},
+ },
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "structs with slices of structs",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs load props",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &PropertyList{
+ Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false},
+ Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true},
+ Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false},
+ Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true},
+ Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load structs with ragged fields",
+ &PropertyList{
+ Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true},
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rot"},
+ },
+ Green: N1{
+ Nonymous: []X0{
+ {I: 10},
+ {I: 11},
+ {I: 12},
+ {I: 13},
+ },
+ },
+ Blue: N1{
+ Nonymous: []X0{
+ {S: "blau0", I: 20},
+ {S: "blau1", I: 21},
+ {S: "blau2"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs with noindex tags",
+ &struct {
+ A struct {
+ X string `datastore:",noindex"`
+ Y string
+ } `datastore:",noindex"`
+ B struct {
+ X string `datastore:",noindex"`
+ Y string
+ }
+ }{},
+ &PropertyList{
+ Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "embedded struct with name override",
+ &struct {
+ Inner1 `datastore:"foo"`
+ }{},
+ &PropertyList{
+ Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "slice of slices",
+ &SliceOfSlices{},
+ nil,
+ "flattening nested structs leads to a slice of slices",
+ "",
+ },
+ {
+ "recursive struct",
+ &Recursive{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "mutually recursive struct",
+ &MutuallyRecursive0{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "non-exported struct fields",
+ &struct {
+ i, J int64
+ }{i: 1, J: 2},
+ &PropertyList{
+ Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage",
+ &struct {
+ J json.RawMessage
+ }{
+ J: json.RawMessage("rawr"),
+ },
+ &PropertyList{
+ Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage to myBlob",
+ &struct {
+ B json.RawMessage
+ }{
+ B: json.RawMessage("rawr"),
+ },
+ &B2{B: myBlob("rawr")},
+ "",
+ "",
+ },
+ {
+ "embedded time field",
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ "",
+ "",
+ },
+ {
+ "embedded time load",
+ &PropertyList{
+ Property{Name: "MyTime.", Value: now, NoIndex: false, Multiple: false},
+ },
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ "",
+ "",
+ },
+}
+
+// checkErr returns the empty string if either both want and err are zero,
+// or if want is a non-empty substring of err's string representation.
+func checkErr(want string, err error) string {
+ if err != nil {
+ got := err.Error()
+ if want == "" || strings.Index(got, want) == -1 {
+ return got
+ }
+ } else if want != "" {
+ return fmt.Sprintf("want error %q", want)
+ }
+ return ""
+}
+
+func TestRoundTrip(t *testing.T) {
+ for _, tc := range testCases {
+ p, err := saveEntity(testAppID, testKey0, tc.src)
+ if s := checkErr(tc.putErr, err); s != "" {
+ t.Errorf("%s: save: %s", tc.desc, s)
+ continue
+ }
+ if p == nil {
+ continue
+ }
+ var got interface{}
+ if _, ok := tc.want.(*PropertyList); ok {
+ got = new(PropertyList)
+ } else {
+ got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ }
+ err = loadEntity(got, p)
+ if s := checkErr(tc.getErr, err); s != "" {
+ t.Errorf("%s: load: %s", tc.desc, s)
+ continue
+ }
+ if pl, ok := got.(*PropertyList); ok {
+ // Sort by name to make sure we have a deterministic order.
+ sort.Stable(byName(*pl))
+ }
+ equal := false
+ if gotT, ok := got.(*T); ok {
+ // Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
+ // We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
+ equal = gotT.T.Equal(tc.want.(*T).T)
+ } else {
+ equal = reflect.DeepEqual(got, tc.want)
+ }
+ if !equal {
+ t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+type byName PropertyList
+
+func (s byName) Len() int { return len(s) }
+func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name }
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func TestQueryConstruction(t *testing.T) {
+ tests := []struct {
+ q, exp *Query
+ err string
+ }{
+ {
+ q: NewQuery("Foo"),
+ exp: &Query{
+ kind: "Foo",
+ limit: -1,
+ },
+ },
+ {
+ // Regular filtered query with standard spacing.
+ q: NewQuery("Foo").Filter("foo >", 7),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterThan,
+ Value: 7,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with no spacing.
+ q: NewQuery("Foo").Filter("foo=", 6),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: equal,
+ Value: 6,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with funky spacing.
+ q: NewQuery("Foo").Filter(" foo< ", 8),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: lessThan,
+ Value: 8,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with multicharacter op.
+ q: NewQuery("Foo").Filter("foo >=", 9),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterEq,
+ Value: 9,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with ordering.
+ q: NewQuery("Foo").Order("bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: ascending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with reverse ordering, and funky spacing.
+ q: NewQuery("Foo").Order(" - bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: descending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with an empty ordering.
+ q: NewQuery("Foo").Order(""),
+ err: "empty order",
+ },
+ {
+ // Query with a + ordering.
+ q: NewQuery("Foo").Order("+bar"),
+ err: "invalid order",
+ },
+ }
+ for i, test := range tests {
+ if test.q.err != nil {
+ got := test.q.err.Error()
+ if !strings.Contains(got, test.err) {
+ t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err)
+ }
+ continue
+ }
+ if !reflect.DeepEqual(test.q, test.exp) {
+ t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
+ }
+ }
+}
+
+func TestStringMeaning(t *testing.T) {
+ var xx [4]interface{}
+ xx[0] = &struct {
+ X string
+ }{"xx0"}
+ xx[1] = &struct {
+ X string `datastore:",noindex"`
+ }{"xx1"}
+ xx[2] = &struct {
+ X []byte
+ }{[]byte("xx2")}
+ xx[3] = &struct {
+ X []byte `datastore:",noindex"`
+ }{[]byte("xx3")}
+
+ indexed := [4]bool{
+ true,
+ false,
+ false, // A []byte is always no-index.
+ false,
+ }
+ want := [4]pb.Property_Meaning{
+ pb.Property_NO_MEANING,
+ pb.Property_TEXT,
+ pb.Property_BLOB,
+ pb.Property_BLOB,
+ }
+
+ for i, x := range xx {
+ props, err := SaveStruct(x)
+ if err != nil {
+ t.Errorf("i=%d: SaveStruct: %v", i, err)
+ continue
+ }
+ e, err := propertiesToProto("appID", testKey0, props)
+ if err != nil {
+ t.Errorf("i=%d: propertiesToProto: %v", i, err)
+ continue
+ }
+ var p *pb.Property
+ switch {
+ case indexed[i] && len(e.Property) == 1:
+ p = e.Property[0]
+ case !indexed[i] && len(e.RawProperty) == 1:
+ p = e.RawProperty[0]
+ default:
+ t.Errorf("i=%d: EntityProto did not have expected property slice", i)
+ continue
+ }
+ if got := p.GetMeaning(); got != want[i] {
+ t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i])
+ continue
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ // These environment variables are necessary because *Query.Run will
+ // call internal.FullyQualifiedAppID which checks these variables or falls
+ // back to the Metadata service that is not available in tests.
+ environ := []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_PARTITION", "1"},
+ }
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ defer func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }()
+
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(req *pb.Query, res *pb.QueryResult) error {
+ namec <- req.NameSpace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ q := NewQuery("SomeKind")
+
+ q.Run(c0)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c0: ns = %q, want nil`, *ns)
+ }
+
+ q.Run(c1)
+ if ns := <-namec; ns == nil {
+ t.Error(`RunQuery with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`RunQuery with c1: ns = %q, want "A"`, *ns)
+ }
+
+ q.Run(c2)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c2: ns = %q, want nil`, *ns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 000000000..85616cf27
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,361 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+ - signed integers (int, int8, int16, int32 and int64),
+ - bool,
+ - string,
+ - float32 and float64,
+ - []byte (up to 1 megabyte in length),
+ - any type whose underlying type is one of the above predeclared types,
+ - ByteString,
+ - *Key,
+ - time.Time (stored with microsecond precision),
+ - appengine.BlobKey,
+ - appengine.GeoPoint,
+ - structs whose fields are all valid value types,
+ - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Entity struct {
+ Value string
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+
+ k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
+ e := new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ old := e.Value
+ e.Value = r.URL.Path
+
+ if _, err := datastore.Put(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+ }
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different calls to datastore. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter).
+
+Fields may have a `datastore:"name,options"` tag. The tag name is the
+property name, which must be one or more valid Go identifiers joined by ".",
+but may start with a lower case letter. An empty tag name means to just use the
+field name. A "-" tag name means that the datastore will ignore that field.
+
+The only valid options are "omitempty" and "noindex".
+
+If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
+The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
+Struct field values will never be empty.
+
+If options include "noindex" then the field will not be indexed. All fields are indexed
+by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
+fields used to store long strings and byte slices must be tagged with "noindex"
+or they will cause Put operations to fail.
+
+To use multiple options together, separate them by a comma.
+The order does not matter.
+
+If the options is "" then the comma may be omitted.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and J are not indexed.
+ // D's tag is equivalent to having no tag at all (E).
+ // I is ignored entirely by the datastore.
+ // J has tag information for both the datastore and json packages.
+ type TaggedStruct struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ }
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+ type Inner1 struct {
+ W int32
+ X string
+ }
+
+ type Inner2 struct {
+ Y float64
+ }
+
+ type Inner3 struct {
+ Z bool
+ }
+
+ type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+ }
+
+then an Outer's properties would be equivalent to those of:
+
+ type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+ }
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+ type CustomPropsExample struct {
+ I, J int
+ // Sum is not stored, but should always be equal to I + J.
+ Sum int `datastore:"-"`
+ }
+
+ func (x *CustomPropsExample) Load(ps []datastore.Property) error {
+ // Load I and J as usual.
+ if err := datastore.LoadStruct(x, ps); err != nil {
+ return err
+ }
+ // Derive the Sum field.
+ x.Sum = x.I + x.J
+ return nil
+ }
+
+ func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
+ // Validate the Sum field.
+ if x.Sum != x.I + x.J {
+ return nil, errors.New("CustomPropsExample has inconsistent sum")
+ }
+ // Save I and J as usual. The code below is equivalent to calling
+ // "return datastore.SaveStruct(x)", but is done manually for
+ // demonstration purposes.
+ return []datastore.Property{
+ {
+ Name: "I",
+ Value: int64(x.I),
+ },
+ {
+ Name: "J",
+ Value: int64(x.J),
+ },
+ }, nil
+ }
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+ - Ancestor and Filter constrain the entities returned by running a query.
+ - Order affects the order in which they are returned.
+ - Project constrains the fields returned.
+ - Distinct de-duplicates projected entities.
+ - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+ - Start, End, Offset and Limit define which sub-sequence of matching entities
+ to return. Start and End take cursors, Offset and Limit take integers. Start
+ and Offset affect the first result, End and Limit affect the last result.
+ If both Start and Offset are set, then the offset is relative to Start.
+ If both End and Limit are set, then the earliest constraint wins. Limit is
+ relative to Start+Offset, not relative to End. As a special case, a
+ negative limit means unlimited.
+
+Example code:
+
+ type Widget struct {
+ Description string
+ Price int
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ q := datastore.NewQuery("Widget").
+ Filter("Price <", 1000).
+ Order("-Price")
+ b := new(bytes.Buffer)
+ for t := q.Run(ctx); ; {
+ var x Widget
+ key, err := t.Next(&x)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.Copy(w, b)
+ }
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+ type Counter struct {
+ Count int
+ }
+
+ func inc(ctx context.Context, key *datastore.Key) (int, error) {
+ var x Counter
+ if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+ return 0, err
+ }
+ x.Count++
+ if _, err := datastore.Put(ctx, key, &x); err != nil {
+ return 0, err
+ }
+ return x.Count, nil
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ var count int
+ err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
+ var err1 error
+ count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
+ return err1
+ }, nil)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "Count=%d", count)
+ }
+
+
+Metadata
+
+The datastore package provides access to some of App Engine's datastore
+metadata. This metadata includes information about the entity groups,
+namespaces, entity kinds, and properties in the datastore, as well as the
+property representations for each property.
+
+Example code:
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ // Print all the kinds in the datastore, with all the indexed
+ // properties (and their representations) for each.
+ ctx := appengine.NewContext(r)
+
+ kinds, err := datastore.Kinds(ctx)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ for _, kind := range kinds {
+ fmt.Fprintf(w, "%s:\n", kind)
+ props, err := datastore.KindProperties(ctx, kind)
+ if err != nil {
+ fmt.Fprintln(w, "\t(unable to retrieve properties)")
+ continue
+ }
+ for p, rep := range props {
+ fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", "))
+ }
+ }
+ }
+*/
+package datastore // import "google.golang.org/appengine/datastore"
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 000000000..ac1f00250
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+ kind string
+ stringID string
+ intID int64
+ parent *Key
+ appID string
+ namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+ return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+ return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+ return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+ return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+ return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+ return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+ return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+ if k == nil {
+ return false
+ }
+ for ; k != nil; k = k.parent {
+ if k.kind == "" || k.appID == "" {
+ return false
+ }
+ if k.stringID != "" && k.intID != 0 {
+ return false
+ }
+ if k.parent != nil {
+ if k.parent.Incomplete() {
+ return false
+ }
+ if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+ for k != nil && o != nil {
+ if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+ return false
+ }
+ k, o = k.parent, o.parent
+ }
+ return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+ for k.parent != nil {
+ k = k.parent
+ }
+ return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+ if k.parent != nil {
+ k.parent.marshal(b)
+ }
+ b.WriteByte('/')
+ b.WriteString(k.kind)
+ b.WriteByte(',')
+ if k.stringID != "" {
+ b.WriteString(k.stringID)
+ } else {
+ b.WriteString(strconv.FormatInt(k.intID, 10))
+ }
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+ if k == nil {
+ return ""
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 512))
+ k.marshal(b)
+ return b.String()
+}
+
+type gobKey struct {
+ Kind string
+ StringID string
+ IntID int64
+ Parent *gobKey
+ AppID string
+ Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+ if k == nil {
+ return nil
+ }
+ return &gobKey{
+ Kind: k.kind,
+ StringID: k.stringID,
+ IntID: k.intID,
+ Parent: keyToGobKey(k.parent),
+ AppID: k.appID,
+ Namespace: k.namespace,
+ }
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+ if gk == nil {
+ return nil
+ }
+ return &Key{
+ kind: gk.Kind,
+ stringID: gk.StringID,
+ intID: gk.IntID,
+ parent: gobKeyToKey(gk.Parent),
+ appID: gk.AppID,
+ namespace: gk.Namespace,
+ }
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+ gk := new(gobKey)
+ if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+ return err
+ }
+ *k = *gobKeyToKey(gk)
+ return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+ if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+ return errors.New("datastore: bad JSON key")
+ }
+ k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+ if err != nil {
+ return err
+ }
+ *k = *k2
+ return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+ ref := keyToProto("", k)
+
+ b, err := proto.Marshal(ref)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trailing padding is stripped.
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+ // Re-add padding.
+ if m := len(encoded) % 4; m != 0 {
+ encoded += strings.Repeat("=", 4-m)
+ }
+
+ b, err := base64.URLEncoding.DecodeString(encoded)
+ if err != nil {
+ return nil, err
+ }
+
+ ref := new(pb.Reference)
+ if err := proto.Unmarshal(b, ref); err != nil {
+ return nil, err
+ }
+
+ return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
+ return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
+ // If there's a parent key, use its namespace.
+ // Otherwise, use any namespace attached to the context.
+ var namespace string
+ if parent != nil {
+ namespace = parent.namespace
+ } else {
+ namespace = internal.NamespaceFromContext(c)
+ }
+
+ return &Key{
+ kind: kind,
+ stringID: stringID,
+ intID: intID,
+ parent: parent,
+ appID: internal.FullyQualifiedAppID(c),
+ namespace: namespace,
+ }
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+ if kind == "" {
+ return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+ }
+ if n < 0 {
+ return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+ }
+ if n == 0 {
+ return 0, 0, nil
+ }
+ req := &pb.AllocateIdsRequest{
+ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+ Size: proto.Int64(int64(n)),
+ }
+ res := &pb.AllocateIdsResponse{}
+ if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
+ return 0, 0, err
+ }
+ // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+ // is inclusive at the low end and exclusive at the high end, so we add 1.
+ low = res.GetStart()
+ high = res.GetEnd() + 1
+ if low+int64(n) != high {
+ return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+ }
+ return low, high, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/key_test.go b/vendor/google.golang.org/appengine/datastore/key_test.go
new file mode 100644
index 000000000..1fb3e9752
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key_test.go
@@ -0,0 +1,204 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "testing"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestKeyEncoding(t *testing.T) {
+ testCases := []struct {
+ desc string
+ key *Key
+ exp string
+ }{
+ {
+ desc: "A simple key with an int ID",
+ key: &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
+ },
+ {
+ desc: "A simple key with a string ID",
+ key: &Key{
+ kind: "Graph",
+ stringID: "graph:7-day-active",
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
+ },
+ {
+ desc: "A key with a parent",
+ key: &Key{
+ kind: "WordIndex",
+ intID: 1033,
+ parent: &Key{
+ kind: "WordIndex",
+ intID: 1020032,
+ appID: "glibrary",
+ },
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
+ },
+ }
+ for _, tc := range testCases {
+ enc := tc.key.Encode()
+ if enc != tc.exp {
+ t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
+ }
+
+ key, err := DecodeKey(tc.exp)
+ if err != nil {
+ t.Errorf("%s: failed decoding key: %v", tc.desc, err)
+ continue
+ }
+ if !key.Equal(tc.key) {
+ t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
+ }
+ }
+}
+
+func TestKeyGob(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 3,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(k); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ k2 := new(Key)
+ if err := gob.NewDecoder(buf).Decode(k2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("gob round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyGob(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(s1); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := gob.NewDecoder(buf).Decode(s2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("gob round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestKeyJSON(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 2,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+ exp := `"` + k.Encode() + `"`
+
+ buf, err := json.Marshal(k)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ if s := string(buf); s != exp {
+ t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
+ }
+
+ k2 := new(Key)
+ if err := json.Unmarshal(buf, k2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("JSON round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyJSON(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf, err := json.Marshal(s1)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := json.Unmarshal(buf, s2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("JSON round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestIncompleteKeyWithParent(t *testing.T) {
+ c := internal.WithAppIDOverride(context.Background(), "s~some-app")
+
+ // fadduh is a complete key.
+ fadduh := NewKey(c, "Person", "", 1, nil)
+ if fadduh.Incomplete() {
+ t.Fatalf("fadduh is incomplete")
+ }
+
+ // robert is an incomplete key with fadduh as a parent.
+ robert := NewIncompleteKey(c, "Person", fadduh)
+ if !robert.Incomplete() {
+ t.Fatalf("robert is complete")
+ }
+
+ // Both should be valid keys.
+ if !fadduh.valid() {
+ t.Errorf("fadduh is invalid: %v", fadduh)
+ }
+ if !robert.valid() {
+ t.Errorf("robert is invalid: %v", robert)
+ }
+}
+
+func TestNamespace(t *testing.T) {
+ key := &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "s~some-app",
+ namespace: "mynamespace",
+ }
+ if g, w := key.Namespace(), "mynamespace"; g != w {
+ t.Errorf("key.Namespace() = %q, want %q", g, w)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 000000000..38a636539
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,429 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
+ typeOfByteSlice = reflect.TypeOf([]byte(nil))
+ typeOfByteString = reflect.TypeOf(ByteString(nil))
+ typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
+ typeOfTime = reflect.TypeOf(time.Time{})
+ typeOfKeyPtr = reflect.TypeOf(&Key{})
+ typeOfEntityPtr = reflect.TypeOf(&Entity{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(pValue interface{}, v reflect.Value) string {
+ entityType := "empty"
+ switch pValue.(type) {
+ case int64:
+ entityType = "int"
+ case bool:
+ entityType = "bool"
+ case string:
+ entityType = "string"
+ case float64:
+ entityType = "float"
+ case *Key:
+ entityType = "*datastore.Key"
+ case time.Time:
+ entityType = "time.Time"
+ case appengine.BlobKey:
+ entityType = "appengine.BlobKey"
+ case appengine.GeoPoint:
+ entityType = "appengine.GeoPoint"
+ case ByteString:
+ entityType = "datastore.ByteString"
+ case []byte:
+ entityType = "[]byte"
+ }
+ return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+ // m holds the number of times a substruct field like "Foo.Bar.Baz" has
+ // been seen so far. The map is constructed lazily.
+ m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+ var v reflect.Value
+ var sliceIndex int
+
+ name := p.Name
+
+ // If name ends with a '.', the last field is anonymous.
+ // In this case, strings.Split will give us "" as the
+ // last element of our fields slice, which will match the ""
+ // field name in the substruct codec.
+ fields := strings.Split(name, ".")
+
+ for len(fields) > 0 {
+ var decoder fieldCodec
+ var ok bool
+
+ // Cut off the last field (delimited by ".") and find its parent
+ // in the codec.
+ // eg. for name "A.B.C.D", split off "A.B.C" and try to
+ // find a field in the codec with this name.
+ // Loop again with "A.B", etc.
+ for i := len(fields); i > 0; i-- {
+ parent := strings.Join(fields[:i], ".")
+ decoder, ok = codec.fields[parent]
+ if ok {
+ fields = fields[i:]
+ break
+ }
+ }
+
+ // If we never found a matching field in the codec, return
+ // error message.
+ if !ok {
+ return "no such struct field"
+ }
+
+ v = initField(structValue, decoder.path)
+ if !v.IsValid() {
+ return "no such struct field"
+ }
+ if !v.CanSet() {
+ return "cannot set struct field"
+ }
+
+ if decoder.structCodec != nil {
+ codec = decoder.structCodec
+ structValue = v
+ }
+
+ if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
+ if l.m == nil {
+ l.m = make(map[string]int)
+ }
+ sliceIndex = l.m[p.Name]
+ l.m[p.Name] = sliceIndex + 1
+ for v.Len() <= sliceIndex {
+ v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+ }
+ structValue = v.Index(sliceIndex)
+ requireSlice = false
+ }
+ }
+
+ var slice reflect.Value
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ slice = v
+ v = reflect.New(v.Type().Elem()).Elem()
+ } else if requireSlice {
+ return "multiple-valued property requires a slice field type"
+ }
+
+ // Convert indexValues to a Go value with a meaning derived from the
+ // destination type.
+ pValue := p.Value
+ if iv, ok := pValue.(indexValue); ok {
+ meaning := pb.Property_NO_MEANING
+ switch v.Type() {
+ case typeOfBlobKey:
+ meaning = pb.Property_BLOBKEY
+ case typeOfByteSlice:
+ meaning = pb.Property_BLOB
+ case typeOfByteString:
+ meaning = pb.Property_BYTESTRING
+ case typeOfGeoPoint:
+ meaning = pb.Property_GEORSS_POINT
+ case typeOfTime:
+ meaning = pb.Property_GD_WHEN
+ case typeOfEntityPtr:
+ meaning = pb.Property_ENTITY_PROTO
+ }
+ var err error
+ pValue, err = propValue(iv.value, meaning)
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ if errReason := setVal(v, pValue); errReason != "" {
+ // Set the slice back to its zero value.
+ if slice.IsValid() {
+ slice.Set(reflect.Zero(slice.Type()))
+ }
+ return errReason
+ }
+
+ if slice.IsValid() {
+ slice.Index(sliceIndex).Set(v)
+ }
+
+ return ""
+}
+
+// setVal sets v to the value pValue.
+func setVal(v reflect.Value, pValue interface{}) string {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x, ok := pValue.(int64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.OverflowInt(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetInt(x)
+ case reflect.Bool:
+ x, ok := pValue.(bool)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.SetBool(x)
+ case reflect.String:
+ switch x := pValue.(type) {
+ case appengine.BlobKey:
+ v.SetString(string(x))
+ case ByteString:
+ v.SetString(string(x))
+ case string:
+ v.SetString(x)
+ default:
+ if pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ x, ok := pValue.(float64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.OverflowFloat(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetFloat(x)
+ case reflect.Ptr:
+ x, ok := pValue.(*Key)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if _, ok := v.Interface().(*Key); !ok {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case reflect.Struct:
+ switch v.Type() {
+ case typeOfTime:
+ x, ok := pValue.(time.Time)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case typeOfGeoPoint:
+ x, ok := pValue.(appengine.GeoPoint)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ default:
+ ent, ok := pValue.(*Entity)
+ if !ok {
+ return typeMismatchReason(pValue, v)
+ }
+
+ // Recursively load nested struct
+ pls, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return err.Error()
+ }
+
+ // if ent has a Key value and our struct has a Key field,
+ // load the Entity's Key value into the Key field on the struct.
+ if ent.Key != nil && pls.codec.keyField != -1 {
+
+ pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
+ }
+
+ err = pls.Load(ent.Properties)
+ if err != nil {
+ return err.Error()
+ }
+ }
+ case reflect.Slice:
+ x, ok := pValue.([]byte)
+ if !ok {
+ if y, yok := pValue.(ByteString); yok {
+ x, ok = []byte(y), true
+ }
+ }
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ return typeMismatchReason(pValue, v)
+ }
+ v.SetBytes(x)
+ default:
+ return typeMismatchReason(pValue, v)
+ }
+ return ""
+}
+
+// initField is similar to reflect's Value.FieldByIndex, in that it
+// returns the nested struct field corresponding to index, but it
+// initialises any nil pointers encountered when traversing the structure.
+func initField(val reflect.Value, index []int) reflect.Value {
+ for _, i := range index[:len(index)-1] {
+ val = val.Field(i)
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+ }
+ return val.Field(index[len(index)-1])
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+ ent, err := protoToEntity(src)
+ if err != nil {
+ return err
+ }
+ if e, ok := dst.(PropertyLoadSaver); ok {
+ return e.Load(ent.Properties)
+ }
+ return LoadStruct(dst, ent.Properties)
+}
+
+func (s structPLS) Load(props []Property) error {
+ var fieldName, reason string
+ var l propertyLoader
+ for _, p := range props {
+ if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+ // We don't return early, as we try to load as many properties as possible.
+ // It is valid to load an entity into a struct that cannot fully represent it.
+ // That case returns an error, but the caller is free to ignore it.
+ fieldName, reason = p.Name, errStr
+ }
+ }
+ if reason != "" {
+ return &ErrFieldMismatch{
+ StructType: s.v.Type(),
+ FieldName: fieldName,
+ Reason: reason,
+ }
+ }
+ return nil
+}
+
+func protoToEntity(src *pb.EntityProto) (*Entity, error) {
+ props, rawProps := src.Property, src.RawProperty
+ outProps := make([]Property, 0, len(props)+len(rawProps))
+ for {
+ var (
+ x *pb.Property
+ noIndex bool
+ )
+ if len(props) > 0 {
+ x, props = props[0], props[1:]
+ } else if len(rawProps) > 0 {
+ x, rawProps = rawProps[0], rawProps[1:]
+ noIndex = true
+ } else {
+ break
+ }
+
+ var value interface{}
+ if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+ value = indexValue{x.Value}
+ } else {
+ var err error
+ value, err = propValue(x.Value, x.GetMeaning())
+ if err != nil {
+ return nil, err
+ }
+ }
+ outProps = append(outProps, Property{
+ Name: x.GetName(),
+ Value: value,
+ NoIndex: noIndex,
+ Multiple: x.GetMultiple(),
+ })
+ }
+
+ var key *Key
+ if src.Key != nil {
+ // Ignore any error, since nested entity values
+ // are allowed to have an invalid key.
+ key, _ = protoToKey(src.Key)
+ }
+ return &Entity{key, outProps}, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+ switch {
+ case v.Int64Value != nil:
+ if m == pb.Property_GD_WHEN {
+ return fromUnixMicro(*v.Int64Value), nil
+ } else {
+ return *v.Int64Value, nil
+ }
+ case v.BooleanValue != nil:
+ return *v.BooleanValue, nil
+ case v.StringValue != nil:
+ if m == pb.Property_BLOB {
+ return []byte(*v.StringValue), nil
+ } else if m == pb.Property_BLOBKEY {
+ return appengine.BlobKey(*v.StringValue), nil
+ } else if m == pb.Property_BYTESTRING {
+ return ByteString(*v.StringValue), nil
+ } else if m == pb.Property_ENTITY_PROTO {
+ var ent pb.EntityProto
+ err := proto.Unmarshal([]byte(*v.StringValue), &ent)
+ if err != nil {
+ return nil, err
+ }
+ return protoToEntity(&ent)
+ } else {
+ return *v.StringValue, nil
+ }
+ case v.DoubleValue != nil:
+ return *v.DoubleValue, nil
+ case v.Referencevalue != nil:
+ key, err := referenceValueToKey(v.Referencevalue)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ case v.Pointvalue != nil:
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+ }
+ return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+ value *pb.PropertyValue
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load_test.go b/vendor/google.golang.org/appengine/datastore/load_test.go
new file mode 100644
index 000000000..46029bba5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load_test.go
@@ -0,0 +1,656 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+
+ proto "github.com/golang/protobuf/proto"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type Simple struct {
+ I int64
+}
+
+type SimpleWithTag struct {
+ I int64 `datastore:"II"`
+}
+
+type NestedSimpleWithTag struct {
+ A SimpleWithTag `datastore:"AA"`
+}
+
+type NestedSliceOfSimple struct {
+ A []Simple
+}
+
+type SimpleTwoFields struct {
+ S string
+ SS string
+}
+
+type NestedSimpleAnonymous struct {
+ Simple
+ X string
+}
+
+type NestedSimple struct {
+ A Simple
+ I int64
+}
+
+type NestedSimple1 struct {
+ A Simple
+ X string
+}
+
+type NestedSimple2X struct {
+ AA NestedSimple
+ A SimpleTwoFields
+ S string
+}
+
+type BDotB struct {
+ B string `datastore:"B.B"`
+}
+
+type ABDotB struct {
+ A BDotB
+}
+
+type MultiAnonymous struct {
+ Simple
+ SimpleTwoFields
+ X string
+}
+
+var (
+ // these values need to be addressable
+ testString2 = "two"
+ testString3 = "three"
+ testInt64 = int64(2)
+
+ fieldNameI = "I"
+ fieldNameX = "X"
+ fieldNameS = "S"
+ fieldNameSS = "SS"
+ fieldNameADotI = "A.I"
+ fieldNameAADotII = "AA.II"
+ fieldNameADotBDotB = "A.B.B"
+)
+
+func TestLoadEntityNestedLegacy(t *testing.T) {
+ testCases := []struct {
+ desc string
+ src *pb.EntityProto
+ want interface{}
+ }{
+ {
+ "nested",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameADotI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimple1{
+ A: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested with tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAADotII,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimpleWithTag{
+ A: SimpleWithTag{I: testInt64},
+ },
+ },
+ {
+ "nested with anonymous struct field",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimpleAnonymous{
+ Simple: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameADotBDotB,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &ABDotB{
+ A: BDotB{
+ B: testString2,
+ },
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ },
+ },
+ &MultiAnonymous{
+ Simple: Simple{I: testInt64},
+ SimpleTwoFields: SimpleTwoFields{S: "two", SS: "three"},
+ X: "three",
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ err := loadEntity(dst, tc.src)
+ if err != nil {
+ t.Errorf("loadEntity: %s: %v", tc.desc, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tc.want, dst) {
+ t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
+ }
+ }
+}
+
+type WithKey struct {
+ X string
+ I int64
+ K *Key `datastore:"__key__"`
+}
+
+type NestedWithKey struct {
+ N WithKey
+ Y string
+}
+
+var (
+ incompleteKey = newKey("", nil)
+ invalidKey = newKey("s", incompleteKey)
+
+ // these values need to be addressable
+ fieldNameA = "A"
+ fieldNameK = "K"
+ fieldNameN = "N"
+ fieldNameY = "Y"
+ fieldNameAA = "AA"
+ fieldNameII = "II"
+ fieldNameBDotB = "B.B"
+
+ entityProtoMeaning = pb.Property_ENTITY_PROTO
+
+ TRUE = true
+ FALSE = false
+)
+
+var (
+ simpleEntityProto, nestedSimpleEntityProto,
+ simpleTwoFieldsEntityProto, simpleWithTagEntityProto,
+ bDotBEntityProto, withKeyEntityProto string
+)
+
+func init() {
+ // simpleEntityProto corresponds to:
+ // Simple{I: testInt64}
+ simpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleEntityProto = string(simpleEntityProtob)
+
+ // nestedSimpleEntityProto corresponds to:
+ // NestedSimple{
+ // A: Simple{I: testInt64},
+ // I: testInt64,
+ // }
+ nestedSimpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ nestedSimpleEntityProto = string(nestedSimpleEntityProtob)
+
+ // simpleTwoFieldsEntityProto corresponds to:
+ // SimpleTwoFields{S: testString2, SS: testString3}
+ simpleTwoFieldsEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleTwoFieldsEntityProto = string(simpleTwoFieldsEntityProtob)
+
+ // simpleWithTagEntityProto corresponds to:
+ // SimpleWithTag{I: testInt64}
+ simpleWithTagEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameII,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleWithTagEntityProto = string(simpleWithTagEntityProtob)
+
+ // bDotBEntityProto corresponds to:
+ // BDotB{
+ // B: testString2,
+ // }
+ bDotBEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameBDotB,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ bDotBEntityProto = string(bDotBEntityProtob)
+
+ // withKeyEntityProto corresponds to:
+ // WithKey{
+ // X: testString3,
+ // I: testInt64,
+ // K: testKey1a,
+ // }
+ withKeyEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", testKey1a),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ withKeyEntityProto = string(withKeyEntityProtob)
+
+}
+
+func TestLoadEntityNested(t *testing.T) {
+ testCases := []struct {
+ desc string
+ src *pb.EntityProto
+ want interface{}
+ }{
+ {
+ "nested basic",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimple{
+ A: Simple{I: 2},
+ I: 2,
+ },
+ },
+ {
+ "nested with struct tags",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleWithTagEntityProto,
+ },
+ },
+ },
+ },
+ &NestedSimpleWithTag{
+ A: SimpleWithTag{I: testInt64},
+ },
+ },
+ {
+ "nested 2x",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &nestedSimpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleTwoFieldsEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ },
+ },
+ &NestedSimple2X{
+ AA: NestedSimple{
+ A: Simple{I: testInt64},
+ I: testInt64,
+ },
+ A: SimpleTwoFields{S: testString2, SS: testString3},
+ S: testString3,
+ },
+ },
+ {
+ "nested anonymous",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &NestedSimpleAnonymous{
+ Simple: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested simple with slice",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Multiple: &TRUE,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Multiple: &TRUE,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ },
+ },
+ &NestedSliceOfSimple{
+ A: []Simple{Simple{I: testInt64}, Simple{I: testInt64}},
+ },
+ },
+ {
+ "nested with multiple anonymous fields",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &MultiAnonymous{
+ Simple: Simple{I: testInt64},
+ SimpleTwoFields: SimpleTwoFields{S: testString2, SS: testString3},
+ X: testString2,
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &bDotBEntityProto,
+ },
+ },
+ },
+ },
+ &ABDotB{
+ A: BDotB{
+ B: testString2,
+ },
+ },
+ },
+ {
+ "nested entity with key",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameY,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameN,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &withKeyEntityProto,
+ },
+ },
+ },
+ },
+ &NestedWithKey{
+ Y: testString2,
+ N: WithKey{
+ X: testString3,
+ I: testInt64,
+ K: testKey1a,
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ err := loadEntity(dst, tc.src)
+ if err != nil {
+ t.Errorf("loadEntity: %s: %v", tc.desc, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tc.want, dst) {
+ t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go
new file mode 100644
index 000000000..6acacc3db
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/metadata.go
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import "golang.org/x/net/context"
+
+// Datastore kinds for the metadata entities.
+const (
+ namespaceKind = "__namespace__"
+ kindKind = "__kind__"
+ propertyKind = "__property__"
+)
+
+// Namespaces returns all the datastore namespaces.
+func Namespaces(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(namespaceKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ // The empty namespace key uses a numeric ID (==1), but luckily
+ // the string ID defaults to "" for numeric IDs anyway.
+ return keyNames(keys), nil
+}
+
+// Kinds returns the names of all the kinds in the current namespace.
+func Kinds(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(kindKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return keyNames(keys), nil
+}
+
+// keyNames returns a slice of the provided keys' names (string IDs).
+func keyNames(keys []*Key) []string {
+ n := make([]string, 0, len(keys))
+ for _, k := range keys {
+ n = append(n, k.StringID())
+ }
+ return n
+}
+
+// KindProperties returns all the indexed properties for the given kind.
+// The properties are returned as a map of property names to a slice of the
+// representation types. The representation types for the supported Go property
+// types are:
+// "INT64": signed integers and time.Time
+// "DOUBLE": float32 and float64
+// "BOOLEAN": bool
+// "STRING": string, []byte and ByteString
+// "POINT": appengine.GeoPoint
+// "REFERENCE": *Key
+// "USER": (not used in the Go runtime)
+func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
+ // TODO(djd): Support range queries.
+ kindKey := NewKey(ctx, kindKind, kind, 0, nil)
+ q := NewQuery(propertyKind).Ancestor(kindKey)
+
+ propMap := map[string][]string{}
+ props := []struct {
+ Repr []string `datastore:"property_representation"`
+ }{}
+
+ keys, err := q.GetAll(ctx, &props)
+ if err != nil {
+ return nil, err
+ }
+ for i, p := range props {
+ propMap[keys[i].StringID()] = p.Repr
+ }
+ return propMap, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 000000000..5cb2079d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,330 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 20000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+ // Name is the property name.
+ Name string
+ // Value is the property value. The valid types are:
+ // - int64
+ // - bool
+ // - string
+ // - float64
+ // - ByteString
+ // - *Key
+ // - time.Time
+ // - appengine.BlobKey
+ // - appengine.GeoPoint
+ // - []byte (up to 1 megabyte in length)
+ // - *Entity (representing a nested struct)
+ // This set is smaller than the set of valid struct field types that the
+ // datastore can load and save. A Property Value cannot be a slice (apart
+ // from []byte); use multiple Properties instead. Also, a Value's type
+ // must be explicitly on the list above; it is not sufficient for the
+ // underlying type to be on that list. For example, a Value of "type
+ // myInt64 int64" is invalid. Smaller-width integers and floats are also
+ // invalid. Again, this is more restrictive than the set of valid struct
+ // field types.
+ //
+ // A Value will have an opaque type when loading entities from an index,
+ // such as via a projection query. Load entities into a struct instead
+ // of a PropertyLoadSaver when using a projection query.
+ //
+ // A Value may also be the nil interface value; this is equivalent to
+ // Python's None but not directly representable by a Go struct. Loading
+ // a nil-valued property into a struct will set that field to the zero
+ // value.
+ Value interface{}
+ // NoIndex is whether the datastore cannot index this property.
+ NoIndex bool
+ // Multiple is whether the entity can have multiple properties with
+ // the same name. Even if a particular instance only has one property with
+ // a certain name, Multiple should be true if a struct would best represent
+ // it as a field of type []T instead of type T.
+ Multiple bool
+}
+
+// An Entity is the value type for a nested struct.
+// This type is only used for a Property's Value.
+type Entity struct {
+ Key *Key
+ Properties []Property
+}
+
+// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+ Load([]Property) error
+ Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+ typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+ typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+ *l = append(*l, p...)
+ return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+ return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for _, s := range strings.Split(name, ".") {
+ if s == "" {
+ return false
+ }
+ first := true
+ for _, c := range s {
+ if first {
+ first = false
+ if c != '_' && !unicode.IsLetter(c) {
+ return false
+ }
+ } else {
+ if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+ // fields gives the field codec for the structTag with the given name.
+ fields map[string]fieldCodec
+ // hasSlice is whether a struct or any of its nested or embedded structs
+ // has a slice-typed field (other than []byte).
+ hasSlice bool
+ // keyField is the index of a *Key field with structTag __key__.
+ // This field is not relevant for the top level struct, only for
+ // nested structs.
+ keyField int
+ // complete is whether the structCodec is complete. An incomplete
+ // structCodec may be encountered when walking a recursive struct.
+ complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+ // path is the index path to the field
+ path []int
+ noIndex bool
+ // omitEmpty indicates that the field should be omitted on save
+ // if empty.
+ omitEmpty bool
+ // structCodec is the codec fot the struct field at index 'path',
+ // or nil if the field is not a struct.
+ structCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+ structCodecsMutex sync.Mutex
+ structCodecs = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+ structCodecsMutex.Lock()
+ defer structCodecsMutex.Unlock()
+ return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+ c, ok := structCodecs[t]
+ if ok {
+ return c, nil
+ }
+ c = &structCodec{
+ fields: make(map[string]fieldCodec),
+ // We initialize keyField to -1 so that the zero-value is not
+ // misinterpreted as index 0.
+ keyField: -1,
+ }
+
+ // Add c to the structCodecs map before we are sure it is good. If t is
+ // a recursive type, it needs to find the incomplete entry for itself in
+ // the map.
+ structCodecs[t] = c
+ defer func() {
+ if retErr != nil {
+ delete(structCodecs, t)
+ }
+ }()
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ // Skip unexported fields.
+ // Note that if f is an anonymous, unexported struct field,
+ // we will promote its fields.
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+
+ tags := strings.Split(f.Tag.Get("datastore"), ",")
+ name := tags[0]
+ opts := make(map[string]bool)
+ for _, t := range tags[1:] {
+ opts[t] = true
+ }
+ switch {
+ case name == "":
+ if !f.Anonymous {
+ name = f.Name
+ }
+ case name == "-":
+ continue
+ case name == "__key__":
+ if f.Type != typeOfKeyPtr {
+ return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
+ }
+ c.keyField = i
+ case !validPropertyName(name):
+ return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+ }
+
+ substructType, fIsSlice := reflect.Type(nil), false
+ switch f.Type.Kind() {
+ case reflect.Struct:
+ substructType = f.Type
+ case reflect.Slice:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ substructType = f.Type.Elem()
+ }
+ fIsSlice = f.Type != typeOfByteSlice
+ c.hasSlice = c.hasSlice || fIsSlice
+ }
+
+ var sub *structCodec
+ if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+ var err error
+ sub, err = getStructCodecLocked(substructType)
+ if err != nil {
+ return nil, err
+ }
+ if !sub.complete {
+ return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+ }
+ if fIsSlice && sub.hasSlice {
+ return nil, fmt.Errorf(
+ "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+ }
+ c.hasSlice = c.hasSlice || sub.hasSlice
+ // If f is an anonymous struct field, we promote the substruct's fields up to this level
+ // in the linked list of struct codecs.
+ if f.Anonymous {
+ for subname, subfield := range sub.fields {
+ if name != "" {
+ subname = name + "." + subname
+ }
+ if _, ok := c.fields[subname]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
+ }
+ c.fields[subname] = fieldCodec{
+ path: append([]int{i}, subfield.path...),
+ noIndex: subfield.noIndex || opts["noindex"],
+ omitEmpty: subfield.omitEmpty,
+ structCodec: subfield.structCodec,
+ }
+ }
+ continue
+ }
+ }
+
+ if _, ok := c.fields[name]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+ }
+ c.fields[name] = fieldCodec{
+ path: []int{i},
+ noIndex: opts["noindex"],
+ omitEmpty: opts["omitempty"],
+ structCodec: sub,
+ }
+ }
+ c.complete = true
+ return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+// newStructPLS returns a structPLS, which implements the
+// PropertyLoadSaver interface, for the struct pointer p.
+func newStructPLS(p interface{}) (*structPLS, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidEntityType
+ }
+ v = v.Elem()
+ codec, err := getStructCodec(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return &structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+ x, err := newStructPLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+ x, err := newStructPLS(src)
+ if err != nil {
+ return nil, err
+ }
+ return x.Save()
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop_test.go b/vendor/google.golang.org/appengine/datastore/prop_test.go
new file mode 100644
index 000000000..1b42249df
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop_test.go
@@ -0,0 +1,547 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+)
+
+func TestValidPropertyName(t *testing.T) {
+ testCases := []struct {
+ name string
+ want bool
+ }{
+ // Invalid names.
+ {"", false},
+ {"'", false},
+ {".", false},
+ {"..", false},
+ {".foo", false},
+ {"0", false},
+ {"00", false},
+ {"X.X.4.X.X", false},
+ {"\n", false},
+ {"\x00", false},
+ {"abc\xffz", false},
+ {"foo.", false},
+ {"foo..", false},
+ {"foo..bar", false},
+ {"☃", false},
+ {`"`, false},
+ // Valid names.
+ {"AB", true},
+ {"Abc", true},
+ {"X.X.X.X.X", true},
+ {"_", true},
+ {"_0", true},
+ {"a", true},
+ {"a_B", true},
+ {"f00", true},
+ {"f0o", true},
+ {"fo0", true},
+ {"foo", true},
+ {"foo.bar", true},
+ {"foo.bar.baz", true},
+ {"世界", true},
+ }
+ for _, tc := range testCases {
+ got := validPropertyName(tc.name)
+ if got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestStructCodec(t *testing.T) {
+ type oStruct struct {
+ O int
+ }
+ type pStruct struct {
+ P int
+ Q int
+ }
+ type rStruct struct {
+ R int
+ S pStruct
+ T oStruct
+ oStruct
+ }
+ type uStruct struct {
+ U int
+ v int
+ }
+ type vStruct struct {
+ V string `datastore:",noindex"`
+ }
+ oStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "O": {path: []int{0}},
+ },
+ complete: true,
+ }
+ pStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "P": {path: []int{0}},
+ "Q": {path: []int{1}},
+ },
+ complete: true,
+ }
+ rStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "R": {path: []int{0}},
+ "S": {path: []int{1}, structCodec: pStructCodec},
+ "T": {path: []int{2}, structCodec: oStructCodec},
+ "O": {path: []int{3, 0}},
+ },
+ complete: true,
+ }
+ uStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "U": {path: []int{0}},
+ },
+ complete: true,
+ }
+ vStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "V": {path: []int{0}, noIndex: true},
+ },
+ complete: true,
+ }
+
+ testCases := []struct {
+ desc string
+ structValue interface{}
+ want *structCodec
+ }{
+ {
+ "oStruct",
+ oStruct{},
+ oStructCodec,
+ },
+ {
+ "pStruct",
+ pStruct{},
+ pStructCodec,
+ },
+ {
+ "rStruct",
+ rStruct{},
+ rStructCodec,
+ },
+ {
+ "uStruct",
+ uStruct{},
+ uStructCodec,
+ },
+ {
+ "non-basic fields",
+ struct {
+ B appengine.BlobKey
+ K *Key
+ T time.Time
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "B": {path: []int{0}},
+ "K": {path: []int{1}},
+ "T": {path: []int{2}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with ignored embed",
+ struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ oStruct `datastore:"-"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "a": {path: []int{0}, noIndex: true},
+ "b": {path: []int{1}},
+ "C": {path: []int{2}, noIndex: true},
+ "D": {path: []int{3}},
+ "E": {path: []int{4}},
+ "J": {path: []int{6}, noIndex: true},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported fields",
+ struct {
+ A int
+ b int
+ C int `datastore:"x"`
+ d int `datastore:"Y"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}},
+ "x": {path: []int{2}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "nested and embedded structs",
+ struct {
+ A int
+ B int
+ CC oStruct
+ DDD rStruct
+ oStruct
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}},
+ "B": {path: []int{1}},
+ "CC": {path: []int{2}, structCodec: oStructCodec},
+ "DDD": {path: []int{3}, structCodec: rStructCodec},
+ "O": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with nested and embedded structs",
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"w"`
+ C oStruct `datastore:"xx"`
+ D rStruct `datastore:"y"`
+ oStruct `datastore:"z"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "w": {path: []int{1}},
+ "xx": {path: []int{2}, structCodec: oStructCodec},
+ "y": {path: []int{3}, structCodec: rStructCodec},
+ "z.O": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported nested and embedded structs",
+ struct {
+ a int
+ B int
+ c uStruct
+ D uStruct
+ uStruct
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "B": {path: []int{1}},
+ "D": {path: []int{3}, structCodec: uStructCodec},
+ "U": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex nested struct",
+ struct {
+ A oStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, structCodec: oStructCodec, noIndex: true},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex slice",
+ struct {
+ A []string `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, noIndex: true},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ {
+ "noindex embedded struct slice",
+ struct {
+ // vStruct has a single field, V, also with noindex.
+ A []vStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, structCodec: vStructCodec, noIndex: true},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ got, err := getStructCodec(reflect.TypeOf(tc.structValue))
+ if err != nil {
+ t.Errorf("%s: getStructCodec: %v", tc.desc, err)
+ continue
+ }
+ // can't reflect.DeepEqual b/c element order in fields map may differ
+ if !isEqualStructCodec(got, tc.want) {
+ t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want)
+ }
+ }
+}
+
+func isEqualStructCodec(got, want *structCodec) bool {
+ if got.complete != want.complete {
+ return false
+ }
+ if got.hasSlice != want.hasSlice {
+ return false
+ }
+ if len(got.fields) != len(want.fields) {
+ return false
+ }
+ for name, wantF := range want.fields {
+ gotF := got.fields[name]
+ if !reflect.DeepEqual(wantF.path, gotF.path) {
+ return false
+ }
+ if wantF.noIndex != gotF.noIndex {
+ return false
+ }
+ if wantF.structCodec != nil {
+ if gotF.structCodec == nil {
+ return false
+ }
+ if !isEqualStructCodec(gotF.structCodec, wantF.structCodec) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+func TestRepeatedPropertyName(t *testing.T) {
+ good := []interface{}{
+ struct {
+ A int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"b"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"B"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"D"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ A int `datastore:"B"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"C"`
+ B int `datastore:"C"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"B"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func TestFlatteningNestedStructs(t *testing.T) {
+ type DeepGood struct {
+ A struct {
+ B []struct {
+ C struct {
+ D int
+ }
+ }
+ }
+ }
+ type DeepBad struct {
+ A struct {
+ B []struct {
+ C struct {
+ D []int
+ }
+ }
+ }
+ }
+ type ISay struct {
+ Tomato int
+ }
+ type YouSay struct {
+ Tomato int
+ }
+ type Tweedledee struct {
+ Dee int `datastore:"D"`
+ }
+ type Tweedledum struct {
+ Dum int `datastore:"D"`
+ }
+
+ good := []interface{}{
+ struct {
+ X []struct {
+ Y string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []byte
+ }
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ }{},
+ struct {
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ DeepGood
+ }{},
+ struct {
+ DG DeepGood
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"B"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ X []struct {
+ Y []string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []int
+ }
+ }{},
+ struct {
+ DeepBad
+ }{},
+ struct {
+ DB DeepBad
+ }{},
+ struct {
+ ISay
+ YouSay
+ }{},
+ struct {
+ Tweedledee
+ Tweedledum
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"A"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
+ for _, x := range good {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
+ t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
+ }
+ }
+ for _, x := range bad {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
+ t.Errorf("type %T: got nil error, want non-nil", x)
+ }
+ }
+}
+
+func TestNilKeyIsStored(t *testing.T) {
+ x := struct {
+ K *Key
+ I int
+ }{}
+ p := PropertyList{}
+ // Save x as properties.
+ p1, _ := SaveStruct(&x)
+ p.Load(p1)
+ // Set x's fields to non-zero.
+ x.K = &Key{}
+ x.I = 2
+ // Load x from properties.
+ p2, _ := p.Save()
+ LoadStruct(&x, p2)
+ // Check that x's fields were set to zero.
+ if x.K != nil {
+ t.Errorf("K field was not zero")
+ }
+ if x.I != 0 {
+ t.Errorf("I field was not zero")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 000000000..3847b0fa6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,724 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+ lessThan operator = iota
+ lessEq
+ equal
+ greaterEq
+ greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+ lessThan: pb.Query_Filter_LESS_THAN.Enum(),
+ lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+ equal: pb.Query_Filter_EQUAL.Enum(),
+ greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+ greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+ FieldName string
+ Op operator
+ Value interface{}
+}
+
+type sortDirection int
+
+const (
+ ascending sortDirection = iota
+ descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+ ascending: pb.Query_Order_ASCENDING.Enum(),
+ descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+ FieldName string
+ Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+ return &Query{
+ kind: kind,
+ limit: -1,
+ }
+}
+
+// Query represents a datastore query.
+type Query struct {
+ kind string
+ ancestor *Key
+ filter []filter
+ order []order
+ projection []string
+
+ distinct bool
+ keysOnly bool
+ eventual bool
+ limit int32
+ offset int32
+ start *pb.CompiledCursor
+ end *pb.CompiledCursor
+
+ err error
+}
+
+func (q *Query) clone() *Query {
+ x := *q
+ // Copy the contents of the slice-typed fields to a new backing store.
+ if len(q.filter) > 0 {
+ x.filter = make([]filter, len(q.filter))
+ copy(x.filter, q.filter)
+ }
+ if len(q.order) > 0 {
+ x.order = make([]order, len(q.order))
+ copy(x.order, q.order)
+ }
+ return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+ q = q.clone()
+ if ancestor == nil {
+ q.err = errors.New("datastore: nil query ancestor")
+ return q
+ }
+ q.ancestor = ancestor
+ return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+ q = q.clone()
+ q.eventual = true
+ return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+ q = q.clone()
+ filterStr = strings.TrimSpace(filterStr)
+ if len(filterStr) < 1 {
+ q.err = errors.New("datastore: invalid filter: " + filterStr)
+ return q
+ }
+ f := filter{
+ FieldName: strings.TrimRight(filterStr, " ><=!"),
+ Value: value,
+ }
+ switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+ case "<=":
+ f.Op = lessEq
+ case ">=":
+ f.Op = greaterEq
+ case "<":
+ f.Op = lessThan
+ case ">":
+ f.Op = greaterThan
+ case "=":
+ f.Op = equal
+ default:
+ q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+ return q
+ }
+ q.filter = append(q.filter, f)
+ return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+ q = q.clone()
+ fieldName = strings.TrimSpace(fieldName)
+ o := order{
+ Direction: ascending,
+ FieldName: fieldName,
+ }
+ if strings.HasPrefix(fieldName, "-") {
+ o.Direction = descending
+ o.FieldName = strings.TrimSpace(fieldName[1:])
+ } else if strings.HasPrefix(fieldName, "+") {
+ q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+ return q
+ }
+ if len(o.FieldName) == 0 {
+ q.err = errors.New("datastore: empty order")
+ return q
+ }
+ q.order = append(q.order, o)
+ return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+ q = q.clone()
+ q.projection = append([]string(nil), fieldNames...)
+ return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+ q = q.clone()
+ q.distinct = true
+ return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+ q = q.clone()
+ q.keysOnly = true
+ return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+ q = q.clone()
+ if limit < math.MinInt32 || limit > math.MaxInt32 {
+ q.err = errors.New("datastore: query limit overflow")
+ return q
+ }
+ q.limit = int32(limit)
+ return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+ q = q.clone()
+ if offset < 0 {
+ q.err = errors.New("datastore: negative query offset")
+ return q
+ }
+ if offset > math.MaxInt32 {
+ q.err = errors.New("datastore: query offset overflow")
+ return q
+ }
+ q.offset = int32(offset)
+ return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.start = c.cc
+ return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.end = c.cc
+ return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+ if len(q.projection) != 0 && q.keysOnly {
+ return errors.New("datastore: query cannot both project and be keys-only")
+ }
+ dst.Reset()
+ dst.App = proto.String(appID)
+ if q.kind != "" {
+ dst.Kind = proto.String(q.kind)
+ }
+ if q.ancestor != nil {
+ dst.Ancestor = keyToProto(appID, q.ancestor)
+ if q.eventual {
+ dst.Strong = proto.Bool(false)
+ }
+ }
+ if q.projection != nil {
+ dst.PropertyName = q.projection
+ if q.distinct {
+ dst.GroupByPropertyName = q.projection
+ }
+ }
+ if q.keysOnly {
+ dst.KeysOnly = proto.Bool(true)
+ dst.RequirePerfectPlan = proto.Bool(true)
+ }
+ for _, qf := range q.filter {
+ if qf.FieldName == "" {
+ return errors.New("datastore: empty query filter field name")
+ }
+ p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+ if errStr != "" {
+ return errors.New("datastore: bad query filter value type: " + errStr)
+ }
+ xf := &pb.Query_Filter{
+ Op: operatorToProto[qf.Op],
+ Property: []*pb.Property{p},
+ }
+ if xf.Op == nil {
+ return errors.New("datastore: unknown query filter operator")
+ }
+ dst.Filter = append(dst.Filter, xf)
+ }
+ for _, qo := range q.order {
+ if qo.FieldName == "" {
+ return errors.New("datastore: empty query order field name")
+ }
+ xo := &pb.Query_Order{
+ Property: proto.String(qo.FieldName),
+ Direction: sortDirectionToProto[qo.Direction],
+ }
+ if xo.Direction == nil {
+ return errors.New("datastore: unknown query order direction")
+ }
+ dst.Order = append(dst.Order, xo)
+ }
+ if q.limit >= 0 {
+ dst.Limit = proto.Int32(q.limit)
+ }
+ if q.offset != 0 {
+ dst.Offset = proto.Int32(q.offset)
+ }
+ dst.CompiledCursor = q.start
+ dst.EndCompiledCursor = q.end
+ dst.Compile = proto.Bool(true)
+ return nil
+}
+
+// Count returns the number of results for the query.
+//
+// The running time and number of API calls made by Count scale linearly with
+// the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise Count will
+// continue until it finishes counting or the provided context expires.
+func (q *Query) Count(c context.Context) (int, error) {
+ // Check that the query is well-formed.
+ if q.err != nil {
+ return 0, q.err
+ }
+
+ // Run a copy of the query, with keysOnly true (if we're not a projection,
+ // since the two are incompatible), and an adjusted offset. We also set the
+ // limit to zero, as we don't want any actual entity data, just the number
+ // of skipped results.
+ newQ := q.clone()
+ newQ.keysOnly = len(newQ.projection) == 0
+ newQ.limit = 0
+ if q.limit < 0 {
+ // If the original query was unlimited, set the new query's offset to maximum.
+ newQ.offset = math.MaxInt32
+ } else {
+ newQ.offset = q.offset + q.limit
+ if newQ.offset < 0 {
+ // Do the best we can, in the presence of overflow.
+ newQ.offset = math.MaxInt32
+ }
+ }
+ req := &pb.Query{}
+ if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
+ return 0, err
+ }
+ res := &pb.QueryResult{}
+ if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
+ return 0, err
+ }
+
+ // n is the count we will return. For example, suppose that our original
+ // query had an offset of 4 and a limit of 2008: the count will be 2008,
+ // provided that there are at least 2012 matching entities. However, the
+ // RPCs will only skip 1000 results at a time. The RPC sequence is:
+ // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 1000
+ // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 2000
+ // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (12, false)
+ // n += 12 // n == 2012
+ // // exit the loop
+ // n -= 4 // n == 2008
+ var n int32
+ for {
+ // The QueryResult should have no actual entity data, just skipped results.
+ if len(res.Result) != 0 {
+ return 0, errors.New("datastore: internal error: Count request returned too much data")
+ }
+ n += res.GetSkippedResults()
+ if !res.GetMoreResults() {
+ break
+ }
+ if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+ return 0, err
+ }
+ }
+ n -= q.offset
+ if n < 0 {
+ // If the offset was greater than the number of matching entities,
+ // return 0 instead of negative.
+ n = 0
+ }
+ return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {
+ if res.Cursor == nil {
+ return errors.New("datastore: internal error: server did not return a cursor")
+ }
+ req := &pb.NextRequest{
+ Cursor: res.Cursor,
+ }
+ if limit >= 0 {
+ req.Count = proto.Int32(limit)
+ }
+ if offset != 0 {
+ req.Offset = proto.Int32(offset)
+ }
+ if res.CompiledCursor != nil {
+ req.Compile = proto.Bool(true)
+ }
+ res.Reset()
+ return internal.Call(c, "datastore_v3", "Next", req, res)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+//
+// The running time and number of API calls made by GetAll scale linearly with
+// with the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise GetAll will
+// continue until it finishes collecting results or the provided context
+// expires.
+func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
+ var (
+ dv reflect.Value
+ mat multiArgType
+ elemType reflect.Type
+ errFieldMismatch error
+ )
+ if !q.keysOnly {
+ dv = reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return nil, ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType = checkMultiArg(dv)
+ if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+ return nil, ErrInvalidEntityType
+ }
+ }
+
+ var keys []*Key
+ for t := q.Run(c); ; {
+ k, e, err := t.next()
+ if err == Done {
+ break
+ }
+ if err != nil {
+ return keys, err
+ }
+ if !q.keysOnly {
+ ev := reflect.New(elemType)
+ if elemType.Kind() == reflect.Map {
+ // This is a special case. The zero values of a map type are
+ // not immediately useful; they have to be make'd.
+ //
+ // Funcs and channels are similar, in that a zero value is not useful,
+ // but even a freshly make'd channel isn't useful: there's no fixed
+ // channel buffer size that is always going to be large enough, and
+ // there's no goroutine to drain the other end. Theoretically, these
+ // types could be supported, for example by sniffing for a constructor
+ // method or requiring prior registration, but for now it's not a
+ // frequent enough concern to be worth it. Programmers can work around
+ // it by explicitly using Iterator.Next instead of the Query.GetAll
+ // convenience method.
+ x := reflect.MakeMap(elemType)
+ ev.Elem().Set(x)
+ }
+ if err = loadEntity(ev.Interface(), e); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return keys, err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ }
+ keys = append(keys, k)
+ }
+ return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c context.Context) *Iterator {
+ if q.err != nil {
+ return &Iterator{err: q.err}
+ }
+ t := &Iterator{
+ c: c,
+ limit: q.limit,
+ q: q,
+ prevCC: q.start,
+ }
+ var req pb.Query
+ if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
+ t.err = err
+ return t
+ }
+ if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
+ t.err = err
+ return t
+ }
+ offset := q.offset - t.res.GetSkippedResults()
+ for offset > 0 && t.res.GetMoreResults() {
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+ t.err = err
+ break
+ }
+ skip := t.res.GetSkippedResults()
+ if skip < 0 {
+ t.err = errors.New("datastore: internal error: negative number of skipped_results")
+ break
+ }
+ offset -= skip
+ }
+ if offset < 0 {
+ t.err = errors.New("datastore: internal error: query offset was overshot")
+ }
+ return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+ c context.Context
+ err error
+ // res is the result of the most recent RunQuery or Next API call.
+ res pb.QueryResult
+ // i is how many elements of res.Result we have iterated over.
+ i int
+ // limit is the limit on the number of results this iterator should return.
+ // A negative value means unlimited.
+ limit int32
+ // q is the original query which yielded this iterator.
+ q *Query
+ // prevCC is the compiled cursor that marks the end of the previous batch
+ // of results.
+ prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+ k, e, err := t.next()
+ if err != nil {
+ return nil, err
+ }
+ if dst != nil && !t.q.keysOnly {
+ err = loadEntity(dst, e)
+ }
+ return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+ if t.err != nil {
+ return nil, nil, t.err
+ }
+
+ // Issue datastore_v3/Next RPCs as necessary.
+ for t.i == len(t.res.Result) {
+ if !t.res.GetMoreResults() {
+ t.err = Done
+ return nil, nil, t.err
+ }
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+ t.err = err
+ return nil, nil, t.err
+ }
+ if t.res.GetSkippedResults() != 0 {
+ t.err = errors.New("datastore: internal error: iterator has skipped results")
+ return nil, nil, t.err
+ }
+ t.i = 0
+ if t.limit >= 0 {
+ t.limit -= int32(len(t.res.Result))
+ if t.limit < 0 {
+ t.err = errors.New("datastore: internal error: query returned more results than the limit")
+ return nil, nil, t.err
+ }
+ }
+ }
+
+ // Extract the key from the t.i'th element of t.res.Result.
+ e := t.res.Result[t.i]
+ t.i++
+ if e.Key == nil {
+ return nil, nil, errors.New("datastore: internal error: server did not return a key")
+ }
+ k, err := protoToKey(e.Key)
+ if err != nil || k.Incomplete() {
+ return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+ if t.err != nil && t.err != Done {
+ return Cursor{}, t.err
+ }
+ // If we are at either end of the current batch of results,
+ // return the compiled cursor at that end.
+ skipped := t.res.GetSkippedResults()
+ if t.i == 0 && skipped == 0 {
+ if t.prevCC == nil {
+ // A nil pointer (of type *pb.CompiledCursor) means no constraint:
+ // passing it as the end cursor of a new query means unlimited results
+ // (glossing over the integer limit parameter for now).
+ // A non-nil pointer to an empty pb.CompiledCursor means the start:
+ // passing it as the end cursor of a new query means 0 results.
+ // If prevCC was nil, then the original query had no start cursor, but
+ // Iterator.Cursor should return "the start" instead of unlimited.
+ return Cursor{&zeroCC}, nil
+ }
+ return Cursor{t.prevCC}, nil
+ }
+ if t.i == len(t.res.Result) {
+ return Cursor{t.res.CompiledCursor}, nil
+ }
+ // Otherwise, re-run the query offset to this iterator's position, starting from
+ // the most recent compiled cursor. This is done on a best-effort basis, as it
+ // is racy; if a concurrent process has added or removed entities, then the
+ // cursor returned may be inconsistent.
+ q := t.q.clone()
+ q.start = t.prevCC
+ q.offset = skipped + int32(t.i)
+ q.limit = 0
+ q.keysOnly = len(q.projection) == 0
+ t1 := q.Run(t.c)
+ _, _, err := t1.next()
+ if err != Done {
+ if err == nil {
+ err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+ }
+ return Cursor{}, err
+ }
+ return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+ cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+ if c.cc == nil {
+ return ""
+ }
+ b, err := proto.Marshal(c.cc)
+ if err != nil {
+ // The only way to construct a Cursor with a non-nil cc field is to
+ // unmarshal from the byte representation. We panic if the unmarshal
+ // succeeds but the marshaling of the unchanged protobuf value fails.
+ panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+ }
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+ if s == "" {
+ return Cursor{&zeroCC}, nil
+ }
+ if n := len(s) % 4; n != 0 {
+ s += strings.Repeat("=", 4-n)
+ }
+ b, err := base64.URLEncoding.DecodeString(s)
+ if err != nil {
+ return Cursor{}, err
+ }
+ cc := &pb.CompiledCursor{}
+ if err := proto.Unmarshal(b, cc); err != nil {
+ return Cursor{}, err
+ }
+ return Cursor{cc}, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query_test.go b/vendor/google.golang.org/appengine/datastore/query_test.go
new file mode 100644
index 000000000..f1b9de87f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query_test.go
@@ -0,0 +1,583 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ path1 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ },
+ }
+ path2 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(8),
+ },
+ },
+ }
+)
+
+func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
+ expectedIn := &pb.Query{
+ App: proto.String("dev~fake-app"),
+ Kind: proto.String("Gopher"),
+ Compile: proto.Bool(true),
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
+ }
+ *out = pb.QueryResult{
+ Result: []*pb.EntityProto{
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path1,
+ },
+ EntityGroup: path1,
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("George"),
+ },
+ },
+ {
+ Name: proto.String("Height"),
+ Value: &pb.PropertyValue{
+ Int64Value: proto.Int64(32),
+ },
+ },
+ },
+ },
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path2,
+ },
+ EntityGroup: path1, // ancestor is George
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("Rufus"),
+ },
+ },
+ // No height for Rufus.
+ },
+ },
+ },
+ MoreResults: proto.Bool(false),
+ }
+ return nil
+}
+
+type StructThatImplementsPLS struct{}
+
+func (StructThatImplementsPLS) Load(p []Property) error { return nil }
+func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = StructThatImplementsPLS{}
+
+type StructPtrThatImplementsPLS struct{}
+
+func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
+func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
+
+type PropertyMap map[string]Property
+
+func (m PropertyMap) Load(props []Property) error {
+ for _, p := range props {
+ if p.Multiple {
+ return errors.New("PropertyMap does not support multiple properties")
+ }
+ m[p.Name] = p
+ }
+ return nil
+}
+
+func (m PropertyMap) Save() ([]Property, error) {
+ props := make([]Property, 0, len(m))
+ for _, p := range m {
+ if p.Multiple {
+ return nil, errors.New("PropertyMap does not support multiple properties")
+ }
+ props = append(props, p)
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = PropertyMap{}
+
+type Gopher struct {
+ Name string
+ Height int
+}
+
+// typeOfEmptyInterface is the type of interface{}, but we can't use
+// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
+// interface{}.
+var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
+
+func TestCheckMultiArg(t *testing.T) {
+ testCases := []struct {
+ v interface{}
+ mat multiArgType
+ elemType reflect.Type
+ }{
+ // Invalid cases.
+ {nil, multiArgTypeInvalid, nil},
+ {Gopher{}, multiArgTypeInvalid, nil},
+ {&Gopher{}, multiArgTypeInvalid, nil},
+ {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
+ {PropertyMap{}, multiArgTypeInvalid, nil},
+ {[]*PropertyList(nil), multiArgTypeInvalid, nil},
+ {[]*PropertyMap(nil), multiArgTypeInvalid, nil},
+ {[]**Gopher(nil), multiArgTypeInvalid, nil},
+ {[]*interface{}(nil), multiArgTypeInvalid, nil},
+ // Valid cases.
+ {
+ []PropertyList(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyList{}),
+ },
+ {
+ []PropertyMap(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyMap{}),
+ },
+ {
+ []StructThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructThatImplementsPLS{}),
+ },
+ {
+ []StructPtrThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructPtrThatImplementsPLS{}),
+ },
+ {
+ []Gopher(nil),
+ multiArgTypeStruct,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []*Gopher(nil),
+ multiArgTypeStructPtr,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []interface{}(nil),
+ multiArgTypeInterface,
+ typeOfEmptyInterface,
+ },
+ }
+ for _, tc := range testCases {
+ mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
+ if mat != tc.mat || elemType != tc.elemType {
+ t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
+ tc.v, mat, elemType, tc.mat, tc.elemType)
+ }
+ }
+}
+
+func TestSimpleQuery(t *testing.T) {
+ struct1 := Gopher{Name: "George", Height: 32}
+ struct2 := Gopher{Name: "Rufus"}
+ pList1 := PropertyList{
+ {
+ Name: "Name",
+ Value: "George",
+ },
+ {
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pList2 := PropertyList{
+ {
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+ pMap1 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "George",
+ },
+ "Height": Property{
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pMap2 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+
+ testCases := []struct {
+ dst interface{}
+ want interface{}
+ }{
+ // The destination must have type *[]P, *[]S or *[]*S, for some non-interface
+ // type P such that *P implements PropertyLoadSaver, or for some struct type S.
+ {new([]Gopher), &[]Gopher{struct1, struct2}},
+ {new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
+ {new([]PropertyList), &[]PropertyList{pList1, pList2}},
+ {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
+
+ // Any other destination type is invalid.
+ {0, nil},
+ {Gopher{}, nil},
+ {PropertyList{}, nil},
+ {PropertyMap{}, nil},
+ {[]int{}, nil},
+ {[]Gopher{}, nil},
+ {[]PropertyList{}, nil},
+ {new(int), nil},
+ {new(Gopher), nil},
+ {new(PropertyList), nil}, // This is a special case.
+ {new(PropertyMap), nil},
+ {new([]int), nil},
+ {new([]map[int]int), nil},
+ {new([]map[string]Property), nil},
+ {new([]map[string]interface{}), nil},
+ {new([]*int), nil},
+ {new([]*map[int]int), nil},
+ {new([]*map[string]Property), nil},
+ {new([]*map[string]interface{}), nil},
+ {new([]**Gopher), nil},
+ {new([]*PropertyList), nil},
+ {new([]*PropertyMap), nil},
+ }
+ for _, tc := range testCases {
+ nCall := 0
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ nCall++
+ return fakeRunQuery(in, out)
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ var (
+ expectedErr error
+ expectedNCall int
+ )
+ if tc.want == nil {
+ expectedErr = ErrInvalidEntityType
+ } else {
+ expectedNCall = 1
+ }
+ keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
+ if err != expectedErr {
+ t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr)
+ continue
+ }
+ if nCall != expectedNCall {
+ t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
+ continue
+ }
+ if err != nil {
+ continue
+ }
+
+ key1 := NewKey(c, "Gopher", "", 6, nil)
+ expectedKeys := []*Key{
+ key1,
+ NewKey(c, "Gopher", "", 8, key1),
+ }
+ if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
+ t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
+ continue
+ }
+ for i, key := range keys {
+ if key.AppID() != "s~test-app" {
+ t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
+ continue
+ }
+ if !keysEqual(key, expectedKeys[i]) {
+ t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
+ continue
+ }
+ }
+
+ if !reflect.DeepEqual(tc.dst, tc.want) {
+ t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
+ continue
+ }
+ }
+}
+
+// keysEqual is like (*Key).Equal, but ignores the App ID.
+func keysEqual(a, b *Key) bool {
+ for a != nil && b != nil {
+ if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
+ return false
+ }
+ a, b = a.Parent(), b.Parent()
+ }
+ return a == b
+}
+
+func TestQueriesAreImmutable(t *testing.T) {
+ // Test that deriving q2 from q1 does not modify q1.
+ q0 := NewQuery("foo")
+ q1 := NewQuery("foo")
+ q2 := q1.Offset(2)
+ if !reflect.DeepEqual(q0, q1) {
+ t.Errorf("q0 and q1 were not equal")
+ }
+ if reflect.DeepEqual(q1, q2) {
+ t.Errorf("q1 and q2 were equal")
+ }
+
+ // Test that deriving from q4 twice does not conflict, even though
+ // q4 has a long list of order clauses. This tests that the arrays
+ // backed by a query's slice of orders are not shared.
+ f := func() *Query {
+ q := NewQuery("bar")
+ // 47 is an ugly number that is unlikely to be near a re-allocation
+ // point in repeated append calls. For example, it's not near a power
+ // of 2 or a multiple of 10.
+ for i := 0; i < 47; i++ {
+ q = q.Order(fmt.Sprintf("x%d", i))
+ }
+ return q
+ }
+ q3 := f().Order("y")
+ q4 := f()
+ q5 := q4.Order("y")
+ q6 := q4.Order("z")
+ if !reflect.DeepEqual(q3, q5) {
+ t.Errorf("q3 and q5 were not equal")
+ }
+ if reflect.DeepEqual(q5, q6) {
+ t.Errorf("q5 and q6 were equal")
+ }
+}
+
+func TestFilterParser(t *testing.T) {
+ testCases := []struct {
+ filterStr string
+ wantOK bool
+ wantFieldName string
+ wantOp operator
+ }{
+ // Supported ops.
+ {"x<", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {" x < ", true, "x", lessThan},
+ {"x <=", true, "x", lessEq},
+ {"x =", true, "x", equal},
+ {"x >=", true, "x", greaterEq},
+ {"x >", true, "x", greaterThan},
+ {"in >", true, "in", greaterThan},
+ {"in>", true, "in", greaterThan},
+ // Valid but (currently) unsupported ops.
+ {"x!=", false, "", 0},
+ {"x !=", false, "", 0},
+ {" x != ", false, "", 0},
+ {"x IN", false, "", 0},
+ {"x in", false, "", 0},
+ // Invalid ops.
+ {"x EQ", false, "", 0},
+ {"x lt", false, "", 0},
+ {"x <>", false, "", 0},
+ {"x >>", false, "", 0},
+ {"x ==", false, "", 0},
+ {"x =<", false, "", 0},
+ {"x =>", false, "", 0},
+ {"x !", false, "", 0},
+ {"x ", false, "", 0},
+ {"x", false, "", 0},
+ }
+ for _, tc := range testCases {
+ q := NewQuery("foo").Filter(tc.filterStr, 42)
+ if ok := q.err == nil; ok != tc.wantOK {
+ t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
+ continue
+ }
+ if !tc.wantOK {
+ continue
+ }
+ if len(q.filter) != 1 {
+ t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
+ continue
+ }
+ got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
+ if got != want {
+ t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
+ continue
+ }
+ }
+}
+
+func TestQueryToProto(t *testing.T) {
+ // The context is required to make Keys for the test cases.
+ var got *pb.Query
+ NoErr := errors.New("No error")
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ got = in
+ return NoErr // return a non-nil error so Run doesn't keep going.
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.Query
+ err string
+ }{
+ {
+ desc: "empty",
+ query: NewQuery(""),
+ want: &pb.Query{},
+ },
+ {
+ desc: "standard query",
+ query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42),
+ want: &pb.Query{
+ Kind: proto.String("kind"),
+ Filter: []*pb.Query_Filter{
+ {
+ Op: pb.Query_Filter_GREATER_THAN.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("I"),
+ Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ {
+ Op: pb.Query_Filter_EQUAL.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("U"),
+ Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ },
+ Order: []*pb.Query_Order{
+ {
+ Property: proto.String("I"),
+ Direction: pb.Query_Order_DESCENDING.Enum(),
+ },
+ },
+ Limit: proto.Int32(7),
+ Offset: proto.Int32(42),
+ },
+ },
+ {
+ desc: "ancestor",
+ query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
+ want: &pb.Query{
+ Ancestor: &pb.Reference{
+ App: proto.String("dev~fake-app"),
+ Path: &pb.Path{
+ Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
+ },
+ },
+ },
+ },
+ {
+ desc: "projection",
+ query: NewQuery("").Project("A", "B"),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "projection with distinct",
+ query: NewQuery("").Project("A", "B").Distinct(),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ GroupByPropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "keys only",
+ query: NewQuery("").KeysOnly(),
+ want: &pb.Query{
+ KeysOnly: proto.Bool(true),
+ RequirePerfectPlan: proto.Bool(true),
+ },
+ },
+ {
+ desc: "empty filter",
+ query: NewQuery("kind").Filter("=", 17),
+ err: "empty query filter field nam",
+ },
+ {
+ desc: "bad filter type",
+ query: NewQuery("kind").Filter("M =", map[string]bool{}),
+ err: "bad query filter value type",
+ },
+ {
+ desc: "bad filter operator",
+ query: NewQuery("kind").Filter("I <<=", 17),
+ err: `invalid operator "<<=" in filter "I <<="`,
+ },
+ {
+ desc: "empty order",
+ query: NewQuery("kind").Order(""),
+ err: "empty order",
+ },
+ {
+ desc: "bad order direction",
+ query: NewQuery("kind").Order("+I"),
+ err: `invalid order: "+I`,
+ },
+ }
+
+ for _, tt := range testCases {
+ got = nil
+ if _, err := tt.query.Run(c).Next(nil); err != NoErr {
+ if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
+ }
+ continue
+ }
+ if tt.err != "" {
+ t.Errorf("%s: no error, want %q", tt.desc, tt.err)
+ continue
+ }
+ // Fields that are common to all protos.
+ tt.want.App = proto.String("dev~fake-app")
+ tt.want.Compile = proto.Bool(true)
+ if !proto.Equal(got, tt.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 000000000..728d4ca0c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,327 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+ // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+ // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+ // be represented in the numerator of a single int64 divide.
+ return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+ return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
+}
+
+var (
+ minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+ maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+ var (
+ pv pb.PropertyValue
+ unsupported bool
+ )
+ switch v.Kind() {
+ case reflect.Invalid:
+ // No-op.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pv.Int64Value = proto.Int64(v.Int())
+ case reflect.Bool:
+ pv.BooleanValue = proto.Bool(v.Bool())
+ case reflect.String:
+ pv.StringValue = proto.String(v.String())
+ case reflect.Float32, reflect.Float64:
+ pv.DoubleValue = proto.Float64(v.Float())
+ case reflect.Ptr:
+ if k, ok := v.Interface().(*Key); ok {
+ if k != nil {
+ pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+ }
+ } else {
+ unsupported = true
+ }
+ case reflect.Struct:
+ switch t := v.Interface().(type) {
+ case time.Time:
+ if t.Before(minTime) || t.After(maxTime) {
+ return nil, "time value out of range"
+ }
+ pv.Int64Value = proto.Int64(toUnixMicro(t))
+ case appengine.GeoPoint:
+ if !t.Valid() {
+ return nil, "invalid GeoPoint value"
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+ default:
+ unsupported = true
+ }
+ case reflect.Slice:
+ if b, ok := v.Interface().([]byte); ok {
+ pv.StringValue = proto.String(string(b))
+ } else {
+ // nvToProto should already catch slice values.
+ // If we get here, we have a slice of slice values.
+ unsupported = true
+ }
+ default:
+ unsupported = true
+ }
+ if unsupported {
+ return nil, "unsupported datastore value type: " + v.Type().String()
+ }
+ p = &pb.Property{
+ Name: proto.String(name),
+ Value: &pv,
+ Multiple: proto.Bool(multiple),
+ }
+ if v.IsValid() {
+ switch v.Interface().(type) {
+ case []byte:
+ p.Meaning = pb.Property_BLOB.Enum()
+ case ByteString:
+ p.Meaning = pb.Property_BYTESTRING.Enum()
+ case appengine.BlobKey:
+ p.Meaning = pb.Property_BLOBKEY.Enum()
+ case time.Time:
+ p.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.GeoPoint:
+ p.Meaning = pb.Property_GEORSS_POINT.Enum()
+ }
+ }
+ return p, ""
+}
+
+type saveOpts struct {
+ noIndex bool
+ multiple bool
+ omitEmpty bool
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+ var err error
+ var props []Property
+ if e, ok := src.(PropertyLoadSaver); ok {
+ props, err = e.Save()
+ } else {
+ props, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
+ if opts.omitEmpty && isEmptyValue(v) {
+ return nil
+ }
+ p := Property{
+ Name: name,
+ NoIndex: opts.noIndex,
+ Multiple: opts.multiple,
+ }
+ switch x := v.Interface().(type) {
+ case *Key:
+ p.Value = x
+ case time.Time:
+ p.Value = x
+ case appengine.BlobKey:
+ p.Value = x
+ case appengine.GeoPoint:
+ p.Value = x
+ case ByteString:
+ p.Value = x
+ default:
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.Value = v.Int()
+ case reflect.Bool:
+ p.Value = v.Bool()
+ case reflect.String:
+ p.Value = v.String()
+ case reflect.Float32, reflect.Float64:
+ p.Value = v.Float()
+ case reflect.Slice:
+ if v.Type().Elem().Kind() == reflect.Uint8 {
+ p.NoIndex = true
+ p.Value = v.Bytes()
+ }
+ case reflect.Struct:
+ if !v.CanAddr() {
+ return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+ }
+ sub, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("datastore: unsupported struct field: %v", err)
+ }
+ return sub.save(props, name+".", opts)
+ }
+ }
+ if p.Value == nil {
+ return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+ }
+ *props = append(*props, p)
+ return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+ var props []Property
+ if err := s.save(&props, "", saveOpts{}); err != nil {
+ return nil, err
+ }
+ return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error {
+ for name, f := range s.codec.fields {
+ name = prefix + name
+ v := s.v.FieldByIndex(f.path)
+ if !v.IsValid() || !v.CanSet() {
+ continue
+ }
+ var opts1 saveOpts
+ opts1.noIndex = opts.noIndex || f.noIndex
+ opts1.multiple = opts.multiple
+ opts1.omitEmpty = f.omitEmpty // don't propagate
+ // For slice fields that aren't []byte, save each element.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ opts1.multiple = true
+ for j := 0; j < v.Len(); j++ {
+ if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ // Otherwise, save the field itself.
+ if err := saveStructProperty(props, name, opts1, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+ e := &pb.EntityProto{
+ Key: keyToProto(defaultAppID, key),
+ }
+ if key.parent == nil {
+ e.EntityGroup = &pb.Path{}
+ } else {
+ e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+ }
+ prevMultiple := make(map[string]bool)
+
+ for _, p := range props {
+ if pm, ok := prevMultiple[p.Name]; ok {
+ if !pm || !p.Multiple {
+ return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+ }
+ } else {
+ prevMultiple[p.Name] = p.Multiple
+ }
+
+ x := &pb.Property{
+ Name: proto.String(p.Name),
+ Value: new(pb.PropertyValue),
+ Multiple: proto.Bool(p.Multiple),
+ }
+ switch v := p.Value.(type) {
+ case int64:
+ x.Value.Int64Value = proto.Int64(v)
+ case bool:
+ x.Value.BooleanValue = proto.Bool(v)
+ case string:
+ x.Value.StringValue = proto.String(v)
+ if p.NoIndex {
+ x.Meaning = pb.Property_TEXT.Enum()
+ }
+ case float64:
+ x.Value.DoubleValue = proto.Float64(v)
+ case *Key:
+ if v != nil {
+ x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+ }
+ case time.Time:
+ if v.Before(minTime) || v.After(maxTime) {
+ return nil, fmt.Errorf("datastore: time value out of range")
+ }
+ x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+ x.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.BlobKey:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOBKEY.Enum()
+ case appengine.GeoPoint:
+ if !v.Valid() {
+ return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+ x.Meaning = pb.Property_GEORSS_POINT.Enum()
+ case []byte:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOB.Enum()
+ if !p.NoIndex {
+ return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+ }
+ case ByteString:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BYTESTRING.Enum()
+ default:
+ if p.Value != nil {
+ return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+ }
+ }
+
+ if p.NoIndex {
+ e.RawProperty = append(e.RawProperty, x)
+ } else {
+ e.Property = append(e.Property, x)
+ if len(e.Property) > maxIndexedProperties {
+ return nil, errors.New("datastore: too many indexed properties")
+ }
+ }
+ }
+ return e, nil
+}
+
+// isEmptyValue is taken from the encoding/json package in the
+// standard library.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/datastore/time_test.go b/vendor/google.golang.org/appengine/datastore/time_test.go
new file mode 100644
index 000000000..ba74b449e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/time_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "testing"
+ "time"
+)
+
+func TestUnixMicro(t *testing.T) {
+ // Test that all these time.Time values survive a round trip to unix micros.
+ testCases := []time.Time{
+ {},
+ time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Unix(-1e6, -1000),
+ time.Unix(-1e6, 0),
+ time.Unix(-1e6, +1000),
+ time.Unix(-60, -1000),
+ time.Unix(-60, 0),
+ time.Unix(-60, +1000),
+ time.Unix(-1, -1000),
+ time.Unix(-1, 0),
+ time.Unix(-1, +1000),
+ time.Unix(0, -3000),
+ time.Unix(0, -2000),
+ time.Unix(0, -1000),
+ time.Unix(0, 0),
+ time.Unix(0, +1000),
+ time.Unix(0, +2000),
+ time.Unix(+60, -1000),
+ time.Unix(+60, 0),
+ time.Unix(+60, +1000),
+ time.Unix(+1e6, -1000),
+ time.Unix(+1e6, 0),
+ time.Unix(+1e6, +1000),
+ time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
+ time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
+ time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
+ time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
+ }
+ for _, tc := range testCases {
+ got := fromUnixMicro(toUnixMicro(tc))
+ if !got.Equal(tc) {
+ t.Errorf("got %q, want %q", got, tc)
+ }
+ }
+
+ // Test that a time.Time that isn't an integral number of microseconds
+ // is not perfectly reconstructed after a round trip.
+ t0 := time.Unix(0, 123)
+ t1 := fromUnixMicro(toUnixMicro(t0))
+ if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
+ t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 000000000..a7f3f2b28
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+ internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts. The number of attempts can be configured by specifying
+// TransactionOptions.Attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Since f may be called multiple times, f should usually be idempotent.
+// datastore.Get is not idempotent when unmarshaling slice fields.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
+ xg := false
+ if opts != nil {
+ xg = opts.XG
+ }
+ attempts := 3
+ if opts != nil && opts.Attempts > 0 {
+ attempts = opts.Attempts
+ }
+ for i := 0; i < attempts; i++ {
+ if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {
+ return err
+ }
+ }
+ return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+ // XG is whether the transaction can cross multiple entity groups. In
+ // comparison, a single group transaction is one where all datastore keys
+ // used have the same root key. Note that cross group transactions do not
+ // have the same behavior as single group transactions. In particular, it
+ // is much more likely to see partially applied transactions in different
+ // entity groups, in global queries.
+ // It is valid to set XG to true even if the transaction is within a
+ // single entity group.
+ XG bool
+ // Attempts controls the number of retries to perform when commits fail
+ // due to a conflicting transaction. If omitted, it defaults to 3.
+ Attempts int
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go
new file mode 100644
index 000000000..52915a422
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay.go
@@ -0,0 +1,295 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package delay provides a way to execute code outside the scope of a
+user request by using the taskqueue API.
+
+To declare a function that may be executed later, call Func
+in a top-level assignment context, passing it an arbitrary string key
+and a function whose first argument is of type context.Context.
+The key is used to look up the function so it can be called later.
+ var laterFunc = delay.Func("key", myFunc)
+It is also possible to use a function literal.
+ var laterFunc = delay.Func("key", func(c context.Context, x string) {
+ // ...
+ })
+
+To call a function, invoke its Call method.
+ laterFunc.Call(c, "something")
+A function may be called any number of times. If the function has any
+return arguments, and the last one is of type error, the function may
+return a non-nil error to signal that the function should be retried.
+
+The arguments to functions may be of any type that is encodable by the gob
+package. If an argument is of interface type, it is the client's responsibility
+to register with the gob package whatever concrete type may be passed for that
+argument; see http://golang.org/pkg/gob/#Register for details.
+
+Any errors during initialization or execution of a function will be
+logged to the application logs. Error logs that occur during initialization will
+be associated with the request that invoked the Call method.
+
+The state of a function invocation that has not yet successfully
+executed is preserved by combining the file name in which it is declared
+with the string key that was passed to the Func function. Updating an app
+with pending function invocations is safe as long as the relevant
+functions have the (filename, key) combination preserved.
+
+The delay package uses the Task Queue API to create tasks that call the
+reserved application path "/_ah/queue/go/delay".
+This path must not be marked as "login: required" in app.yaml;
+it must be marked as "login: admin" or have no access restriction.
+*/
+package delay // import "google.golang.org/appengine/delay"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "runtime"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/taskqueue"
+)
+
+// Function represents a function that may have a delayed invocation.
+type Function struct {
+ fv reflect.Value // Kind() == reflect.Func
+ key string
+ err error // any error during initialization
+}
+
+const (
+ // The HTTP path for invocations.
+ path = "/_ah/queue/go/delay"
+ // Use the default queue.
+ queue = ""
+)
+
+type contextKey int
+
+var (
+ // registry of all delayed functions
+ funcs = make(map[string]*Function)
+
+ // precomputed types
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+ // errors
+ errFirstArg = errors.New("first argument must be context.Context")
+ errOutsideDelayFunc = errors.New("request headers are only available inside a delay.Func")
+
+ // context keys
+ headersContextKey contextKey = 0
+)
+
+// Func declares a new Function. The second argument must be a function with a
+// first argument of type context.Context.
+// This function must be called at program initialization time. That means it
+// must be called in a global variable declaration or from an init function.
+// This restriction is necessary because the instance that delays a function
+// call may not be the one that executes it. Only the code executed at program
+// initialization time is guaranteed to have been run by an instance before it
+// receives a request.
+func Func(key string, i interface{}) *Function {
+ f := &Function{fv: reflect.ValueOf(i)}
+
+ // Derive unique, somewhat stable key for this func.
+ _, file, _, _ := runtime.Caller(1)
+ f.key = file + ":" + key
+
+ t := f.fv.Type()
+ if t.Kind() != reflect.Func {
+ f.err = errors.New("not a function")
+ return f
+ }
+ if t.NumIn() == 0 || !isContext(t.In(0)) {
+ f.err = errFirstArg
+ return f
+ }
+
+ // Register the function's arguments with the gob package.
+ // This is required because they are marshaled inside a []interface{}.
+ // gob.Register only expects to be called during initialization;
+ // that's fine because this function expects the same.
+ for i := 0; i < t.NumIn(); i++ {
+ // Only concrete types may be registered. If the argument has
+ // interface type, the client is resposible for registering the
+ // concrete types it will hold.
+ if t.In(i).Kind() == reflect.Interface {
+ continue
+ }
+ gob.Register(reflect.Zero(t.In(i)).Interface())
+ }
+
+ if old := funcs[f.key]; old != nil {
+ old.err = fmt.Errorf("multiple functions registered for %s in %s", key, file)
+ }
+ funcs[f.key] = f
+ return f
+}
+
+type invocation struct {
+ Key string
+ Args []interface{}
+}
+
+// Call invokes a delayed function.
+// err := f.Call(c, ...)
+// is equivalent to
+// t, _ := f.Task(...)
+// _, err := taskqueue.Add(c, t, "")
+func (f *Function) Call(c context.Context, args ...interface{}) error {
+ t, err := f.Task(args...)
+ if err != nil {
+ return err
+ }
+ _, err = taskqueueAdder(c, t, queue)
+ return err
+}
+
+// Task creates a Task that will invoke the function.
+// Its parameters may be tweaked before adding it to a queue.
+// Users should not modify the Path or Payload fields of the returned Task.
+func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
+ if f.err != nil {
+ return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
+ }
+
+ nArgs := len(args) + 1 // +1 for the context.Context
+ ft := f.fv.Type()
+ minArgs := ft.NumIn()
+ if ft.IsVariadic() {
+ minArgs--
+ }
+ if nArgs < minArgs {
+ return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
+ }
+ if !ft.IsVariadic() && nArgs > minArgs {
+ return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
+ }
+
+ // Check arg types.
+ for i := 1; i < nArgs; i++ {
+ at := reflect.TypeOf(args[i-1])
+ var dt reflect.Type
+ if i < minArgs {
+ // not a variadic arg
+ dt = ft.In(i)
+ } else {
+ // a variadic arg
+ dt = ft.In(minArgs).Elem()
+ }
+ // nil arguments won't have a type, so they need special handling.
+ if at == nil {
+ // nil interface
+ switch dt.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ continue // may be nil
+ }
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
+ }
+ switch at.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ av := reflect.ValueOf(args[i-1])
+ if av.IsNil() {
+ // nil value in interface; not supported by gob, so we replace it
+ // with a nil interface value
+ args[i-1] = nil
+ }
+ }
+ if !at.AssignableTo(dt) {
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
+ }
+ }
+
+ inv := invocation{
+ Key: f.key,
+ Args: args,
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(inv); err != nil {
+ return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
+ }
+
+ return &taskqueue.Task{
+ Path: path,
+ Payload: buf.Bytes(),
+ }, nil
+}
+
+// Request returns the special task-queue HTTP request headers for the current
+// task queue handler. Returns an error if called from outside a delay.Func.
+func RequestHeaders(c context.Context) (*taskqueue.RequestHeaders, error) {
+ if ret, ok := c.Value(headersContextKey).(*taskqueue.RequestHeaders); ok {
+ return ret, nil
+ }
+ return nil, errOutsideDelayFunc
+}
+
+var taskqueueAdder = taskqueue.Add // for testing
+
+func init() {
+ http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
+ runFunc(appengine.NewContext(req), w, req)
+ })
+}
+
+func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+
+ c = context.WithValue(c, headersContextKey, taskqueue.ParseRequestHeaders(req.Header))
+
+ var inv invocation
+ if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
+ log.Errorf(c, "delay: failed decoding task payload: %v", err)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ f := funcs[inv.Key]
+ if f == nil {
+ log.Errorf(c, "delay: no func with key %q found", inv.Key)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ ft := f.fv.Type()
+ in := []reflect.Value{reflect.ValueOf(c)}
+ for _, arg := range inv.Args {
+ var v reflect.Value
+ if arg != nil {
+ v = reflect.ValueOf(arg)
+ } else {
+ // Task was passed a nil argument, so we must construct
+ // the zero value for the argument here.
+ n := len(in) // we're constructing the nth argument
+ var at reflect.Type
+ if !ft.IsVariadic() || n < ft.NumIn()-1 {
+ at = ft.In(n)
+ } else {
+ at = ft.In(ft.NumIn() - 1).Elem()
+ }
+ v = reflect.Zero(at)
+ }
+ in = append(in, v)
+ }
+ out := f.fv.Call(in)
+
+ if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
+ if errv := out[n-1]; !errv.IsNil() {
+ log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface())
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_go17.go b/vendor/google.golang.org/appengine/delay/delay_go17.go
new file mode 100644
index 000000000..9a59e8b0d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_go17.go
@@ -0,0 +1,23 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build go1.7
+
+package delay
+
+import (
+ stdctx "context"
+ "reflect"
+
+ netctx "golang.org/x/net/context"
+)
+
+var (
+ stdContextType = reflect.TypeOf((*stdctx.Context)(nil)).Elem()
+ netContextType = reflect.TypeOf((*netctx.Context)(nil)).Elem()
+)
+
+func isContext(t reflect.Type) bool {
+ return t == stdContextType || t == netContextType
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_go17_test.go b/vendor/google.golang.org/appengine/delay/delay_go17_test.go
new file mode 100644
index 000000000..0e708d005
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_go17_test.go
@@ -0,0 +1,55 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build go1.7
+
+package delay
+
+import (
+ "bytes"
+ stdctx "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ netctx "golang.org/x/net/context"
+ "google.golang.org/appengine/taskqueue"
+)
+
+var (
+ stdCtxRuns = 0
+ stdCtxFunc = Func("stdctx", func(c stdctx.Context) {
+ stdCtxRuns++
+ })
+)
+
+func TestStandardContext(t *testing.T) {
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ netctx.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ c := newFakeContext()
+ stdCtxRuns = 0 // reset state
+ if err := stdCtxFunc.Call(c.ctx); err != nil {
+ t.Fatal("Function.Call:", err)
+ }
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if stdCtxRuns != 1 {
+ t.Errorf("stdCtxRuns: got %d, want 1", stdCtxRuns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_pre17.go b/vendor/google.golang.org/appengine/delay/delay_pre17.go
new file mode 100644
index 000000000..d30c75dfb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_pre17.go
@@ -0,0 +1,19 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build !go1.7
+
+package delay
+
+import (
+ "reflect"
+
+ "golang.org/x/net/context"
+)
+
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+
+func isContext(t reflect.Type) bool {
+ return t == contextType
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_test.go b/vendor/google.golang.org/appengine/delay/delay_test.go
new file mode 100644
index 000000000..3df2bf7e3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_test.go
@@ -0,0 +1,428 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package delay
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/taskqueue"
+)
+
+type CustomType struct {
+ N int
+}
+
+type CustomInterface interface {
+ N() int
+}
+
+type CustomImpl int
+
+func (c CustomImpl) N() int { return int(c) }
+
+// CustomImpl needs to be registered with gob.
+func init() {
+ gob.Register(CustomImpl(0))
+}
+
+var (
+ invalidFunc = Func("invalid", func() {})
+
+ regFuncRuns = 0
+ regFuncMsg = ""
+ regFunc = Func("reg", func(c context.Context, arg string) {
+ regFuncRuns++
+ regFuncMsg = arg
+ })
+
+ custFuncTally = 0
+ custFunc = Func("cust", func(c context.Context, ct *CustomType, ci CustomInterface) {
+ a, b := 2, 3
+ if ct != nil {
+ a = ct.N
+ }
+ if ci != nil {
+ b = ci.N()
+ }
+ custFuncTally += a + b
+ })
+
+ anotherCustFunc = Func("cust2", func(c context.Context, n int, ct *CustomType, ci CustomInterface) {
+ })
+
+ varFuncMsg = ""
+ varFunc = Func("variadic", func(c context.Context, format string, args ...int) {
+ // convert []int to []interface{} for fmt.Sprintf.
+ as := make([]interface{}, len(args))
+ for i, a := range args {
+ as[i] = a
+ }
+ varFuncMsg = fmt.Sprintf(format, as...)
+ })
+
+ errFuncRuns = 0
+ errFuncErr = errors.New("error!")
+ errFunc = Func("err", func(c context.Context) error {
+ errFuncRuns++
+ if errFuncRuns == 1 {
+ return nil
+ }
+ return errFuncErr
+ })
+
+ dupeWhich = 0
+ dupe1Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 1
+ }
+ })
+ dupe2Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 2
+ }
+ })
+
+ reqFuncRuns = 0
+ reqFuncHeaders *taskqueue.RequestHeaders
+ reqFuncErr error
+ reqFunc = Func("req", func(c context.Context) {
+ reqFuncRuns++
+ reqFuncHeaders, reqFuncErr = RequestHeaders(c)
+ })
+)
+
+type fakeContext struct {
+ ctx context.Context
+ logging [][]interface{}
+}
+
+func newFakeContext() *fakeContext {
+ f := new(fakeContext)
+ f.ctx = internal.WithCallOverride(context.Background(), f.call)
+ f.ctx = internal.WithLogOverride(f.ctx, f.logf)
+ return f
+}
+
+func (f *fakeContext) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ panic("should never be called")
+}
+
+var logLevels = map[int64]string{1: "INFO", 3: "ERROR"}
+
+func (f *fakeContext) logf(level int64, format string, args ...interface{}) {
+ f.logging = append(f.logging, append([]interface{}{logLevels[level], format}, args...))
+}
+
+func TestInvalidFunction(t *testing.T) {
+ c := newFakeContext()
+
+ if got, want := invalidFunc.Call(c.ctx), fmt.Errorf("delay: func is invalid: %s", errFirstArg); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestVariadicFunctionArguments(t *testing.T) {
+ // Check the argument type validation for variadic functions.
+
+ c := newFakeContext()
+
+ calls := 0
+ taskqueueAdder = func(c context.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) {
+ calls++
+ return t, nil
+ }
+
+ varFunc.Call(c.ctx, "hi")
+ varFunc.Call(c.ctx, "%d", 12)
+ varFunc.Call(c.ctx, "%d %d %d", 3, 1, 4)
+ if calls != 3 {
+ t.Errorf("Got %d calls to taskqueueAdder, want 3", calls)
+ }
+
+ if got, want := varFunc.Call(c.ctx, "%d %s", 12, "a string is bad"), errors.New("delay: argument 3 has wrong type: string is not assignable to int"); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestBadArguments(t *testing.T) {
+ // Try running regFunc with different sets of inappropriate arguments.
+
+ c := newFakeContext()
+
+ tests := []struct {
+ args []interface{} // all except context
+ wantErr string
+ }{
+ {
+ args: nil,
+ wantErr: "delay: too few arguments to func: 1 < 2",
+ },
+ {
+ args: []interface{}{"lala", 53},
+ wantErr: "delay: too many arguments to func: 3 > 2",
+ },
+ {
+ args: []interface{}{53},
+ wantErr: "delay: argument 1 has wrong type: int is not assignable to string",
+ },
+ }
+ for i, tc := range tests {
+ got := regFunc.Call(c.ctx, tc.args...)
+ if got.Error() != tc.wantErr {
+ t.Errorf("Call %v: got %q, want %q", i, got, tc.wantErr)
+ }
+ }
+}
+
+func TestRunningFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ regFuncRuns, regFuncMsg = 0, "" // reset state
+ const msg = "Why, hello!"
+ regFunc.Call(c.ctx, msg)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if regFuncRuns != 1 {
+ t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns)
+ }
+ if regFuncMsg != msg {
+ t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg)
+ }
+}
+
+func TestCustomType(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, &CustomType{N: 11}, CustomImpl(13))
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 24 {
+ t.Errorf("custFuncTally = %d, want 24", custFuncTally)
+ }
+
+ // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value),
+ // and the other is a nil interface value.
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, (*CustomType)(nil), nil)
+
+ // Simulate the Task Queue service.
+ req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw = httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 5 {
+ t.Errorf("custFuncTally = %d, want 5", custFuncTally)
+ }
+}
+
+func TestRunningVariadic(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ varFuncMsg = "" // reset state
+ varFunc.Call(c.ctx, "Amiga %d has %d KB RAM", 500, 512)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ const expected = "Amiga 500 has 512 KB RAM"
+ if varFuncMsg != expected {
+ t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected)
+ }
+}
+
+func TestErrorFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ errFunc.Call(c.ctx)
+
+ // Simulate the Task Queue service.
+ // The first call should succeed; the second call should fail.
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ }
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ if rw.Code != http.StatusInternalServerError {
+ t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError)
+ }
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "delay: func failed (will retry): %v", errFuncErr},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+ }
+}
+
+func TestDuplicateFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ if err := dupe1Func.Call(c.ctx); err == nil {
+ t.Error("dupe1Func.Call did not return error")
+ }
+ if task != nil {
+ t.Error("dupe1Func.Call posted a task")
+ }
+ if err := dupe2Func.Call(c.ctx); err != nil {
+ t.Errorf("dupe2Func.Call error: %v", err)
+ }
+ if task == nil {
+ t.Fatalf("dupe2Func.Call did not post a task")
+ }
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if dupeWhich == 1 {
+ t.Error("dupe2Func.Call used old registered function")
+ } else if dupeWhich != 2 {
+ t.Errorf("dupeWhich = %d; want 2", dupeWhich)
+ }
+}
+
+func TestGetRequestHeadersFromContext(t *testing.T) {
+ c := newFakeContext()
+
+ // Outside a delay.Func should return an error.
+ headers, err := RequestHeaders(c.ctx)
+ if headers != nil {
+ t.Errorf("RequestHeaders outside Func, got %v, want nil", headers)
+ }
+ if err != errOutsideDelayFunc {
+ t.Errorf("RequestHeaders outside Func err, got %v, want %v", err, errOutsideDelayFunc)
+ }
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ reqFunc.Call(c.ctx)
+
+ reqFuncRuns, reqFuncHeaders = 0, nil // reset state
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ req.Header.Set("x-appengine-taskname", "foobar")
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if reqFuncRuns != 1 {
+ t.Errorf("reqFuncRuns: got %d, want 1", reqFuncRuns)
+ }
+ if reqFuncHeaders.TaskName != "foobar" {
+ t.Errorf("reqFuncHeaders.TaskName: got %v, want 'foobar'", reqFuncHeaders.TaskName)
+ }
+ if reqFuncErr != nil {
+ t.Errorf("reqFuncErr: got %v, want nil", reqFuncErr)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
new file mode 100644
index 000000000..334250332
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
@@ -0,0 +1,14 @@
+# Demo application for App Engine "flexible environment".
+runtime: go
+vm: true
+api_version: go1
+
+handlers:
+# Favicon. Without this, the browser hits this once per page view.
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+
+# Main app. All the real work is here.
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
new file mode 100644
index 000000000..1a71ea772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
new file mode 100644
index 000000000..04a0432bb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
@@ -0,0 +1,109 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+var initTime time.Time
+
+type Greeting struct {
+ Author string
+ Content string
+ Date time.Time
+}
+
+func main() {
+ http.HandleFunc("/", handleMainPage)
+ http.HandleFunc("/sign", handleSign)
+ appengine.Main()
+}
+
+// guestbookKey returns the key used for all guestbook entries.
+func guestbookKey(ctx context.Context) *datastore.Key {
+ // The string "default_guestbook" here could be varied to have multiple guestbooks.
+ return datastore.NewKey(ctx, "Guestbook", "default_guestbook", 0, nil)
+}
+
+var tpl = template.Must(template.ParseGlob("templates/*.html"))
+
+func handleMainPage(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ http.Error(w, "GET requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ tic := time.Now()
+ q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(ctx)).Order("-Date").Limit(10)
+ var gg []*Greeting
+ if _, err := q.GetAll(ctx, &gg); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ log.Errorf(ctx, "GetAll: %v", err)
+ return
+ }
+ log.Infof(ctx, "Datastore lookup took %s", time.Since(tic).String())
+ log.Infof(ctx, "Rendering %d greetings", len(gg))
+
+ var email, logout, login string
+ if u := user.Current(ctx); u != nil {
+ logout, _ = user.LogoutURL(ctx, "/")
+ email = u.Email
+ } else {
+ login, _ = user.LoginURL(ctx, "/")
+ }
+ data := struct {
+ Greetings []*Greeting
+ Login, Logout, Email string
+ }{
+ Greetings: gg,
+ Login: login,
+ Logout: logout,
+ Email: email,
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil {
+ log.Errorf(ctx, "%v", err)
+ }
+}
+
+func handleSign(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ ctx := appengine.NewContext(r)
+ g := &Greeting{
+ Content: r.FormValue("content"),
+ Date: time.Now(),
+ }
+ if u := user.Current(ctx); u != nil {
+ g.Author = u.String()
+ }
+ key := datastore.NewIncompleteKey(ctx, "Greeting", guestbookKey(ctx))
+ if _, err := datastore.Put(ctx, key, g); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // Redirect with 303 which causes the subsequent request to use GET.
+ http.Redirect(w, r, "/", http.StatusSeeOther)
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/index.yaml b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
new file mode 100644
index 000000000..315ffeb0e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
@@ -0,0 +1,7 @@
+indexes:
+
+- kind: Greeting
+ ancestor: yes
+ properties:
+ - name: Date
+ direction: desc
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
new file mode 100644
index 000000000..322b7cf63
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Guestbook Demo</title>
+ </head>
+ <body>
+ <p>
+ {{with .Email}}You are currently logged in as {{.}}.{{end}}
+ {{with .Login}}<a href="{{.}}">Sign in</a>{{end}}
+ {{with .Logout}}<a href="{{.}}">Sign out</a>{{end}}
+ </p>
+
+ {{range .Greetings }}
+ <p>
+ {{with .Author}}<b>{{.}}</b>{{else}}An anonymous person{{end}}
+ on <em>{{.Date.Format "3:04pm, Mon 2 Jan"}}</em>
+ wrote <blockquote>{{.Content}}</blockquote>
+ </p>
+ {{end}}
+
+ <form action="/sign" method="post">
+ <div><textarea name="content" rows="3" cols="60"></textarea></div>
+ <div><input type="submit" value="Sign Guestbook"></div>
+ </form>
+ </body>
+</html>
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
new file mode 100644
index 000000000..15091192f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
@@ -0,0 +1,10 @@
+runtime: go
+api_version: go1
+vm: true
+
+handlers:
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
new file mode 100644
index 000000000..f19c04d27
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
new file mode 100644
index 000000000..fbe9f56ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
@@ -0,0 +1,50 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+)
+
+var initTime = time.Now()
+
+func main() {
+ http.HandleFunc("/", handle)
+ appengine.Main()
+}
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ log.Infof(ctx, "Serving the front page.")
+
+ tmpl.Execute(w, time.Since(initTime))
+}
+
+var tmpl = template.Must(template.New("front").Parse(`
+<html><body>
+
+<p>
+Hello, World! 세상아 안녕!
+</p>
+
+<p>
+This instance has been running for <em>{{.}}</em>.
+</p>
+
+</body></html>
+`))
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 000000000..16d0772e2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go
new file mode 100644
index 000000000..c3cd58baf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/file/file.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package file provides helper functions for using Google Cloud Storage.
+package file
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ aipb "google.golang.org/appengine/internal/app_identity"
+)
+
+// DefaultBucketName returns the name of this application's
+// default Google Cloud Storage bucket.
+func DefaultBucketName(c context.Context) (string, error) {
+ req := &aipb.GetDefaultGcsBucketNameRequest{}
+ res := &aipb.GetDefaultGcsBucketNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res)
+ if err != nil {
+ return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
+ }
+ return res.GetDefaultGcsBucketName(), nil
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 000000000..b8dcf8f36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+ return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+ return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go
new file mode 100644
index 000000000..027a41b70
--- /dev/null
+++ b/vendor/google.golang.org/appengine/image/image.go
@@ -0,0 +1,67 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package image provides image services.
+package image // import "google.golang.org/appengine/image"
+
+import (
+ "fmt"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/image"
+)
+
+type ServingURLOptions struct {
+ Secure bool // whether the URL should use HTTPS
+
+ // Size must be between zero and 1600.
+ // If Size is non-zero, a resized version of the image is served,
+ // and Size is the served image's longest dimension. The aspect ratio is preserved.
+ // If Crop is true the image is cropped from the center instead of being resized.
+ Size int
+ Crop bool
+}
+
+// ServingURL returns a URL that will serve an image from Blobstore.
+func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {
+ req := &pb.ImagesGetUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ if opts != nil && opts.Secure {
+ req.CreateSecureUrl = &opts.Secure
+ }
+ res := &pb.ImagesGetUrlBaseResponse{}
+ if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil {
+ return nil, err
+ }
+
+ // The URL may have suffixes added to dynamically resize or crop:
+ // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio.
+ // - adding "=s32-c" is the same as "=s32" except it will be cropped.
+ u := *res.Url
+ if opts != nil && opts.Size > 0 {
+ u += fmt.Sprintf("=s%d", opts.Size)
+ if opts.Crop {
+ u += "-c"
+ }
+ }
+ return url.Parse(u)
+}
+
+// DeleteServingURL deletes the serving URL for an image.
+func DeleteServingURL(c context.Context, key appengine.BlobKey) error {
+ req := &pb.ImagesDeleteUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ res := &pb.ImagesDeleteUrlBaseResponse{}
+ return internal.Call(c, "images", "DeleteUrlBase", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
new file mode 100644
index 000000000..eb5b2c65b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
@@ -0,0 +1,81 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package aetesting provides utilities for testing App Engine packages.
+// This is not for testing user applications.
+package aetesting
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// FakeSingleContext returns a context whose Call invocations will be serviced
+// by f, which should be a function that has two arguments of the input and output
+// protocol buffer type, and one error return.
+func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context {
+ fv := reflect.ValueOf(f)
+ if fv.Kind() != reflect.Func {
+ t.Fatal("not a function")
+ }
+ ft := fv.Type()
+ if ft.NumIn() != 2 || ft.NumOut() != 1 {
+ t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut())
+ }
+ for i := 0; i < 2; i++ {
+ at := ft.In(i)
+ if !at.Implements(protoMessageType) {
+ t.Fatalf("arg %d does not implement proto.Message", i)
+ }
+ }
+ if ft.Out(0) != errorType {
+ t.Fatalf("f's return is %v, want error", ft.Out(0))
+ }
+ s := &single{
+ t: t,
+ service: service,
+ method: method,
+ f: fv,
+ }
+ return internal.WithCallOverride(internal.ContextForTesting(&http.Request{}), s.call)
+}
+
+var (
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+type single struct {
+ t *testing.T
+ service, method string
+ f reflect.Value
+}
+
+func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ if service == "__go__" {
+ if method == "GetNamespace" {
+ return nil // always yield an empty namespace
+ }
+ return fmt.Errorf("Unknown API call /%s.%s", service, method)
+ }
+ if service != s.service || method != s.method {
+ s.t.Fatalf("Unexpected call to /%s.%s", service, method)
+ }
+ ins := []reflect.Value{
+ reflect.ValueOf(in),
+ reflect.ValueOf(out),
+ }
+ outs := s.f.Call(ins)
+ if outs[0].IsNil() {
+ return nil
+ }
+ return outs[0].Interface().(error)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 000000000..16f87c5d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,660 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+ backgroundContextOnce sync.Once
+ backgroundContext netcontext.Context
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// jointContext joins two contexts in a superficial way.
+// It takes values and timeouts from a base context, and only values from another context.
+type jointContext struct {
+ base netcontext.Context
+ valuesOnly netcontext.Context
+}
+
+func (c jointContext) Deadline() (time.Time, bool) {
+ return c.base.Deadline()
+}
+
+func (c jointContext) Done() <-chan struct{} {
+ return c.base.Done()
+}
+
+func (c jointContext) Err() error {
+ return c.base.Err()
+}
+
+func (c jointContext) Value(key interface{}) interface{} {
+ if val := c.base.Value(key); val != nil {
+ return val
+ }
+ return c.valuesOnly.Value(key)
+}
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return req.Context()
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ return jointContext{
+ base: parent,
+ valuesOnly: req.Context(),
+ }
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ backgroundContextOnce.Do(func() {
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+ backgroundContext = toContext(c)
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go c.logFlusher(make(chan int))
+ })
+
+ return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctx := withContext(decorate(req.Context()), c)
+ req = req.WithContext(ctx)
+ c.req = req
+ return req, func() {}
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 000000000..f0f40b2e3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil, errNotAppEngineContext
+ }
+ return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 000000000..e0c0b214b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "errors"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+ var environ = []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_MINOR_VERSION", "067924799508853122"},
+ {"GAE_MODULE_INSTANCE", "0"},
+ {"GAE_MODULE_NAME", "default"},
+ {"GAE_MODULE_VERSION", "20150612t184001"},
+ }
+
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ return func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go
new file mode 100644
index 000000000..028b4f056
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_pre17.go
@@ -0,0 +1,682 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build !go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+ // dec is used by tests to decorate the netcontext.Context returned
+ // for a given request. This allows tests to add overrides (such as
+ // WithAppIDOverride) to the context. The map is nil outside tests.
+ dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ d := ctxs.dec[req]
+ ctxs.Unlock()
+
+ if d != nil {
+ parent = d(parent)
+ }
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctxs.Lock()
+ defer ctxs.Unlock()
+ if _, ok := ctxs.m[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if _, ok := ctxs.dec[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if ctxs.dec == nil {
+ ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+ }
+ ctxs.m[req] = c
+ ctxs.dec[req] = decorate
+
+ return req, func() {
+ ctxs.Lock()
+ delete(ctxs.m, req)
+ delete(ctxs.dec, req)
+ ctxs.Unlock()
+ }
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go
new file mode 100644
index 000000000..6cfe90649
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_race_test.go
@@ -0,0 +1,9 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package internal
+
+func init() { raceDetector = true }
diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go
new file mode 100644
index 000000000..76624a28e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_test.go
@@ -0,0 +1,466 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const testTicketHeader = "X-Magic-Ticket-Header"
+
+func init() {
+ ticketHeader = testTicketHeader
+}
+
+type fakeAPIHandler struct {
+ hang chan int // used for RunSlowly RPC
+
+ LogFlushes int32 // atomic
+}
+
+func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ writeResponse := func(res *remotepb.Response) {
+ hresBody, err := proto.Marshal(res)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500)
+ return
+ }
+ w.Write(hresBody)
+ }
+
+ if r.URL.Path != "/rpc_http" {
+ http.NotFound(w, r)
+ return
+ }
+ hreqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Bad body: %v", err), 500)
+ return
+ }
+ apiReq := &remotepb.Request{}
+ if err := proto.Unmarshal(hreqBody, apiReq); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500)
+ return
+ }
+ if *apiReq.RequestId != "s3cr3t" && *apiReq.RequestId != DefaultTicket() {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)),
+ Detail: proto.String("bad security ticket"),
+ },
+ })
+ return
+ }
+ if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)),
+ Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)),
+ },
+ })
+ return
+ }
+
+ service, method := *apiReq.ServiceName, *apiReq.Method
+ var resOut proto.Message
+ if service == "actordb" && method == "LookupActor" {
+ req := &basepb.StringProto{}
+ res := &basepb.StringProto{}
+ if err := proto.Unmarshal(apiReq.Request, req); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500)
+ return
+ }
+ if *req.Value == "Doctor Who" {
+ res.Value = proto.String("David Tennant")
+ }
+ resOut = res
+ }
+ if service == "errors" {
+ switch method {
+ case "Non200":
+ http.Error(w, "I'm a little teapot.", 418)
+ return
+ case "ShortResponse":
+ w.Header().Set("Content-Length", "100")
+ w.Write([]byte("way too short"))
+ return
+ case "OverQuota":
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)),
+ Detail: proto.String("you are hogging the resources!"),
+ },
+ })
+ return
+ case "RunSlowly":
+ // TestAPICallRPCFailure creates f.hang, but does not strobe it
+ // until Call returns with remotepb.RpcError_CANCELLED.
+ // This is here to force a happens-before relationship between
+ // the httptest server handler and shutdown.
+ <-f.hang
+ resOut = &basepb.VoidProto{}
+ }
+ }
+ if service == "logservice" && method == "Flush" {
+ // Pretend log flushing is slow.
+ time.Sleep(50 * time.Millisecond)
+ atomic.AddInt32(&f.LogFlushes, 1)
+ resOut = &basepb.VoidProto{}
+ }
+
+ encOut, err := proto.Marshal(resOut)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500)
+ return
+ }
+ writeResponse(&remotepb.Response{
+ Response: encOut,
+ })
+}
+
+func setup() (f *fakeAPIHandler, c *context, cleanup func()) {
+ f = &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ u, err := url.Parse(srv.URL + apiPath)
+ if err != nil {
+ panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err))
+ }
+ return f, &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }, srv.Close
+}
+
+func TestAPICall(t *testing.T) {
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := Call(toContext(c), "actordb", "LookupActor", req, res)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallTicketUnavailable(t *testing.T) {
+ resetEnv := SetTestEnv()
+ defer resetEnv()
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ c.req.Header.Set(ticketHeader, "")
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := Call(toContext(c), "actordb", "LookupActor", req, res)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallRPCFailure(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ testCases := []struct {
+ method string
+ code remotepb.RpcError_ErrorCode
+ }{
+ {"Non200", remotepb.RpcError_UNKNOWN},
+ {"ShortResponse", remotepb.RpcError_UNKNOWN},
+ {"OverQuota", remotepb.RpcError_OVER_QUOTA},
+ {"RunSlowly", remotepb.RpcError_CANCELLED},
+ }
+ f.hang = make(chan int) // only for RunSlowly
+ for _, tc := range testCases {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{})
+ ce, ok := err.(*CallError)
+ if !ok {
+ t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err)
+ continue
+ }
+ if ce.Code != int32(tc.code) {
+ t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code)
+ }
+ if tc.method == "RunSlowly" {
+ f.hang <- 1 // release the HTTP handler
+ }
+ }
+}
+
+func TestAPICallDialFailure(t *testing.T) {
+ // See what happens if the API host is unresponsive.
+ // This should time out quickly, not hang forever.
+ _, c, cleanup := setup()
+ defer cleanup()
+ // Reset the URL to the production address so that dialing fails.
+ c.apiURL = apiURL()
+
+ start := time.Now()
+ err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{})
+ const max = 1 * time.Second
+ if taken := time.Since(start); taken > max {
+ t.Errorf("Dial hang took too long: %v > %v", taken, max)
+ }
+ if err == nil {
+ t.Error("Call did not fail")
+ }
+}
+
+func TestDelayedLogFlushing(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) {
+ logC := WithContext(netcontext.Background(), r)
+ fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL.
+ Logf(logC, 1, "It's a lovely day.")
+ w.WriteHeader(200)
+ w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush
+ })
+
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Path: "/quick_log",
+ },
+ Header: c.req.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ w := httptest.NewRecorder()
+
+ // Check that log flushing does not hold up the HTTP response.
+ start := time.Now()
+ handleHTTP(w, r)
+ if d := time.Since(start); d > 10*time.Millisecond {
+ t.Errorf("handleHTTP took %v, want under 10ms", d)
+ }
+ const hdr = "X-AppEngine-Log-Flush-Count"
+ if h := w.HeaderMap.Get(hdr); h != "1" {
+ t.Errorf("%s header = %q, want %q", hdr, h, "1")
+ }
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 0 {
+ t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f)
+ }
+
+ // Check that the log flush eventually comes in.
+ time.Sleep(100 * time.Millisecond)
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 1 {
+ t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f)
+ }
+}
+
+func TestRemoteAddr(t *testing.T) {
+ var addr string
+ http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) {
+ addr = r.RemoteAddr
+ })
+
+ testCases := []struct {
+ headers http.Header
+ addr string
+ }{
+ {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"},
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}},
+ "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80",
+ },
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}},
+ "[::1]:http",
+ },
+ {http.Header{}, "127.0.0.1:80"},
+ }
+
+ for _, tc := range testCases {
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/remote_addr"},
+ Header: tc.headers,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ handleHTTP(httptest.NewRecorder(), r)
+ if addr != tc.addr {
+ t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr)
+ }
+ }
+}
+
+func TestPanickingHandler(t *testing.T) {
+ http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) {
+ panic("whoops!")
+ })
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/panic"},
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ rec := httptest.NewRecorder()
+ handleHTTP(rec, r)
+ if rec.Code != 500 {
+ t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500)
+ }
+}
+
+var raceDetector = false
+
+func TestAPICallAllocations(t *testing.T) {
+ if raceDetector {
+ t.Skip("not running under race detector")
+ }
+
+ // Run the test API server in a subprocess so we aren't counting its allocations.
+ u, cleanup := launchHelperProcess(t)
+ defer cleanup()
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ var apiErr error
+ avg := testing.AllocsPerRun(100, func() {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil {
+ apiErr = err // get the first error only
+ }
+ })
+ if apiErr != nil {
+ t.Errorf("API call failed: %v", apiErr)
+ }
+
+ // Lots of room for improvement...
+ // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6.
+ const min, max float64 = 70, 100
+ if avg < min || max < avg {
+ t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max)
+ }
+}
+
+func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) {
+ cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe: %v", err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe: %v", err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Starting helper process: %v", err)
+ }
+
+ scan := bufio.NewScanner(stdout)
+ var u *url.URL
+ for scan.Scan() {
+ line := scan.Text()
+ if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line {
+ var err error
+ u, err = url.Parse(hp)
+ if err != nil {
+ t.Fatalf("Failed to parse %q: %v", hp, err)
+ }
+ break
+ }
+ }
+ if err := scan.Err(); err != nil {
+ t.Fatalf("Scanning helper process stdout: %v", err)
+ }
+ if u == nil {
+ t.Fatal("Helper process never reported")
+ }
+
+ return u, func() {
+ stdin.Close()
+ if err := cmd.Wait(); err != nil {
+ t.Errorf("Helper process did not exit cleanly: %v", err)
+ }
+ }
+}
+
+const helperProcessMagic = "A lovely helper process is listening at "
+
+// This isn't a real test. It's used as a helper process.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ f := &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ defer srv.Close()
+ fmt.Println(helperProcessMagic + srv.URL + apiPath)
+
+ // Wait for stdin to be closed.
+ io.Copy(ioutil.Discard, os.Stdin)
+}
+
+func TestBackgroundContext(t *testing.T) {
+ resetEnv := SetTestEnv()
+ defer resetEnv()
+
+ ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header"
+ if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w {
+ t.Errorf("%v = %q, want %q", key, g, w)
+ }
+
+ // Check that using the background context doesn't panic.
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 000000000..11df8c07b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go
new file mode 100644
index 000000000..e69195cd4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id_test.go
@@ -0,0 +1,34 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "testing"
+)
+
+func TestAppIDParsing(t *testing.T) {
+ testCases := []struct {
+ in string
+ partition, domain, displayID string
+ }{
+ {"simple-app-id", "", "", "simple-app-id"},
+ {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"},
+ {"part~partition-app-id", "part", "", "partition-app-id"},
+ {"part~domain.com:display", "part", "domain.com", "display"},
+ }
+
+ for _, tc := range testCases {
+ part, dom, dis := parseFullAppID(tc.in)
+ if part != tc.partition {
+ t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition)
+ }
+ if dom != tc.domain {
+ t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain)
+ }
+ if dis != tc.displayID {
+ t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 000000000..87d9701b8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 000000000..19610ca5b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 000000000..36a195650
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 000000000..56cd7a3ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
new file mode 100644
index 000000000..8705ec348
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+// DO NOT EDIT!
+
+/*
+Package blobstore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+
+It has these top-level messages:
+ BlobstoreServiceError
+ CreateUploadURLRequest
+ CreateUploadURLResponse
+ DeleteBlobRequest
+ FetchDataRequest
+ FetchDataResponse
+ CloneBlobRequest
+ CloneBlobResponse
+ DecodeBlobKeyRequest
+ DecodeBlobKeyResponse
+ CreateEncodedGoogleStorageKeyRequest
+ CreateEncodedGoogleStorageKeyResponse
+*/
+package blobstore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type BlobstoreServiceError_ErrorCode int32
+
+const (
+ BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0
+ BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1
+ BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2
+ BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3
+ BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4
+ BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5
+ BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6
+ BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8
+ BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9
+)
+
+var BlobstoreServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "URL_TOO_LONG",
+ 3: "PERMISSION_DENIED",
+ 4: "BLOB_NOT_FOUND",
+ 5: "DATA_INDEX_OUT_OF_RANGE",
+ 6: "BLOB_FETCH_SIZE_TOO_LARGE",
+ 8: "ARGUMENT_OUT_OF_RANGE",
+ 9: "INVALID_BLOB_KEY",
+}
+var BlobstoreServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "URL_TOO_LONG": 2,
+ "PERMISSION_DENIED": 3,
+ "BLOB_NOT_FOUND": 4,
+ "DATA_INDEX_OUT_OF_RANGE": 5,
+ "BLOB_FETCH_SIZE_TOO_LARGE": 6,
+ "ARGUMENT_OUT_OF_RANGE": 8,
+ "INVALID_BLOB_KEY": 9,
+}
+
+func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode {
+ p := new(BlobstoreServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x BlobstoreServiceError_ErrorCode) String() string {
+ return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x))
+}
+func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = BlobstoreServiceError_ErrorCode(value)
+ return nil
+}
+
+type BlobstoreServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} }
+func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) }
+func (*BlobstoreServiceError) ProtoMessage() {}
+
+type CreateUploadURLRequest struct {
+ SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"`
+ MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"`
+ MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"`
+ GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"`
+ UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} }
+func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLRequest) ProtoMessage() {}
+
+func (m *CreateUploadURLRequest) GetSuccessPath() string {
+ if m != nil && m.SuccessPath != nil {
+ return *m.SuccessPath
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 {
+ if m != nil && m.MaxUploadSizeBytes != nil {
+ return *m.MaxUploadSizeBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 {
+ if m != nil && m.MaxUploadSizePerBlobBytes != nil {
+ return *m.MaxUploadSizePerBlobBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetGsBucketName() string {
+ if m != nil && m.GsBucketName != nil {
+ return *m.GsBucketName
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 {
+ if m != nil && m.UrlExpiryTimeSeconds != nil {
+ return *m.UrlExpiryTimeSeconds
+ }
+ return 0
+}
+
+type CreateUploadURLResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} }
+func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLResponse) ProtoMessage() {}
+
+func (m *CreateUploadURLResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type DeleteBlobRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} }
+func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteBlobRequest) ProtoMessage() {}
+
+func (m *DeleteBlobRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *DeleteBlobRequest) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+type FetchDataRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"`
+ EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} }
+func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) }
+func (*FetchDataRequest) ProtoMessage() {}
+
+func (m *FetchDataRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *FetchDataRequest) GetStartIndex() int64 {
+ if m != nil && m.StartIndex != nil {
+ return *m.StartIndex
+ }
+ return 0
+}
+
+func (m *FetchDataRequest) GetEndIndex() int64 {
+ if m != nil && m.EndIndex != nil {
+ return *m.EndIndex
+ }
+ return 0
+}
+
+type FetchDataResponse struct {
+ Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} }
+func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) }
+func (*FetchDataResponse) ProtoMessage() {}
+
+func (m *FetchDataResponse) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type CloneBlobRequest struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"`
+ TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} }
+func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobRequest) ProtoMessage() {}
+
+func (m *CloneBlobRequest) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetMimeType() []byte {
+ if m != nil {
+ return m.MimeType
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetTargetAppId() []byte {
+ if m != nil {
+ return m.TargetAppId
+ }
+ return nil
+}
+
+type CloneBlobResponse struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} }
+func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobResponse) ProtoMessage() {}
+
+func (m *CloneBlobResponse) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} }
+func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyRequest) ProtoMessage() {}
+
+func (m *DecodeBlobKeyRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyResponse struct {
+ Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} }
+func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyResponse) ProtoMessage() {}
+
+func (m *DecodeBlobKeyResponse) GetDecoded() []string {
+ if m != nil {
+ return m.Decoded
+ }
+ return nil
+}
+
+type CreateEncodedGoogleStorageKeyRequest struct {
+ Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} }
+func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string {
+ if m != nil && m.Filename != nil {
+ return *m.Filename
+ }
+ return ""
+}
+
+type CreateEncodedGoogleStorageKeyResponse struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} }
+func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
new file mode 100644
index 000000000..33b265032
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
@@ -0,0 +1,71 @@
+syntax = "proto2";
+option go_package = "blobstore";
+
+package appengine;
+
+message BlobstoreServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ URL_TOO_LONG = 2;
+ PERMISSION_DENIED = 3;
+ BLOB_NOT_FOUND = 4;
+ DATA_INDEX_OUT_OF_RANGE = 5;
+ BLOB_FETCH_SIZE_TOO_LARGE = 6;
+ ARGUMENT_OUT_OF_RANGE = 8;
+ INVALID_BLOB_KEY = 9;
+ }
+}
+
+message CreateUploadURLRequest {
+ required string success_path = 1;
+ optional int64 max_upload_size_bytes = 2;
+ optional int64 max_upload_size_per_blob_bytes = 3;
+ optional string gs_bucket_name = 4;
+ optional int32 url_expiry_time_seconds = 5;
+}
+
+message CreateUploadURLResponse {
+ required string url = 1;
+}
+
+message DeleteBlobRequest {
+ repeated string blob_key = 1;
+ optional string token = 2;
+}
+
+message FetchDataRequest {
+ required string blob_key = 1;
+ required int64 start_index = 2;
+ required int64 end_index = 3;
+}
+
+message FetchDataResponse {
+ required bytes data = 1000 [ctype = CORD];
+}
+
+message CloneBlobRequest {
+ required bytes blob_key = 1;
+ required bytes mime_type = 2;
+ required bytes target_app_id = 3;
+}
+
+message CloneBlobResponse {
+ required bytes blob_key = 1;
+}
+
+message DecodeBlobKeyRequest {
+ repeated string blob_key = 1;
+}
+
+message DecodeBlobKeyResponse {
+ repeated string decoded = 1;
+}
+
+message CreateEncodedGoogleStorageKeyRequest {
+ required string filename = 1;
+}
+
+message CreateEncodedGoogleStorageKeyResponse {
+ required string blob_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
new file mode 100644
index 000000000..173636400
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
@@ -0,0 +1,125 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/capability/capability_service.proto
+// DO NOT EDIT!
+
+/*
+Package capability is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/capability/capability_service.proto
+
+It has these top-level messages:
+ IsEnabledRequest
+ IsEnabledResponse
+*/
+package capability
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type IsEnabledResponse_SummaryStatus int32
+
+const (
+ IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0
+ IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1
+ IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2
+ IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3
+ IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4
+ IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5
+)
+
+var IsEnabledResponse_SummaryStatus_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "ENABLED",
+ 2: "SCHEDULED_FUTURE",
+ 3: "SCHEDULED_NOW",
+ 4: "DISABLED",
+ 5: "UNKNOWN",
+}
+var IsEnabledResponse_SummaryStatus_value = map[string]int32{
+ "DEFAULT": 0,
+ "ENABLED": 1,
+ "SCHEDULED_FUTURE": 2,
+ "SCHEDULED_NOW": 3,
+ "DISABLED": 4,
+ "UNKNOWN": 5,
+}
+
+func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus {
+ p := new(IsEnabledResponse_SummaryStatus)
+ *p = x
+ return p
+}
+func (x IsEnabledResponse_SummaryStatus) String() string {
+ return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x))
+}
+func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus")
+ if err != nil {
+ return err
+ }
+ *x = IsEnabledResponse_SummaryStatus(value)
+ return nil
+}
+
+type IsEnabledRequest struct {
+ Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"`
+ Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"`
+ Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} }
+func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledRequest) ProtoMessage() {}
+
+func (m *IsEnabledRequest) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *IsEnabledRequest) GetCapability() []string {
+ if m != nil {
+ return m.Capability
+ }
+ return nil
+}
+
+func (m *IsEnabledRequest) GetCall() []string {
+ if m != nil {
+ return m.Call
+ }
+ return nil
+}
+
+type IsEnabledResponse struct {
+ SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"`
+ TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} }
+func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledResponse) ProtoMessage() {}
+
+func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus {
+ if m != nil && m.SummaryStatus != nil {
+ return *m.SummaryStatus
+ }
+ return IsEnabledResponse_DEFAULT
+}
+
+func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 {
+ if m != nil && m.TimeUntilScheduled != nil {
+ return *m.TimeUntilScheduled
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
new file mode 100644
index 000000000..5660ab6ee
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
@@ -0,0 +1,28 @@
+syntax = "proto2";
+option go_package = "capability";
+
+package appengine;
+
+message IsEnabledRequest {
+ required string package = 1;
+ repeated string capability = 2;
+ repeated string call = 3;
+}
+
+message IsEnabledResponse {
+ enum SummaryStatus {
+ DEFAULT = 0;
+ ENABLED = 1;
+ SCHEDULED_FUTURE = 2;
+ SCHEDULED_NOW = 3;
+ DISABLED = 4;
+ UNKNOWN = 5;
+ }
+ optional SummaryStatus summary_status = 1;
+
+ optional int64 time_until_scheduled = 2;
+}
+
+service CapabilityService {
+ rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {};
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
new file mode 100644
index 000000000..7b8d00c98
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
@@ -0,0 +1,154 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/channel/channel_service.proto
+// DO NOT EDIT!
+
+/*
+Package channel is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/channel/channel_service.proto
+
+It has these top-level messages:
+ ChannelServiceError
+ CreateChannelRequest
+ CreateChannelResponse
+ SendMessageRequest
+*/
+package channel
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ChannelServiceError_ErrorCode int32
+
+const (
+ ChannelServiceError_OK ChannelServiceError_ErrorCode = 0
+ ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1
+ ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2
+ ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3
+ ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4
+ ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5
+)
+
+var ChannelServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "INVALID_CHANNEL_KEY",
+ 3: "BAD_MESSAGE",
+ 4: "INVALID_CHANNEL_TOKEN_DURATION",
+ 5: "APPID_ALIAS_REQUIRED",
+}
+var ChannelServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "INVALID_CHANNEL_KEY": 2,
+ "BAD_MESSAGE": 3,
+ "INVALID_CHANNEL_TOKEN_DURATION": 4,
+ "APPID_ALIAS_REQUIRED": 5,
+}
+
+func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {
+ p := new(ChannelServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ChannelServiceError_ErrorCode) String() string {
+ return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))
+}
+func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ChannelServiceError_ErrorCode(value)
+ return nil
+}
+
+type ChannelServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} }
+func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }
+func (*ChannelServiceError) ProtoMessage() {}
+
+type CreateChannelRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} }
+func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelRequest) ProtoMessage() {}
+
+func (m *CreateChannelRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *CreateChannelRequest) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type CreateChannelResponse struct {
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} }
+func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelResponse) ProtoMessage() {}
+
+func (m *CreateChannelResponse) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+func (m *CreateChannelResponse) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type SendMessageRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} }
+func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*SendMessageRequest) ProtoMessage() {}
+
+func (m *SendMessageRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *SendMessageRequest) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
new file mode 100644
index 000000000..2b5a918ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
@@ -0,0 +1,30 @@
+syntax = "proto2";
+option go_package = "channel";
+
+package appengine;
+
+message ChannelServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ INVALID_CHANNEL_KEY = 2;
+ BAD_MESSAGE = 3;
+ INVALID_CHANNEL_TOKEN_DURATION = 4;
+ APPID_ALIAS_REQUIRED = 5;
+ }
+}
+
+message CreateChannelRequest {
+ required string application_key = 1;
+ optional int32 duration_minutes = 2;
+}
+
+message CreateChannelResponse {
+ optional string token = 2;
+ optional int32 duration_minutes = 3;
+}
+
+message SendMessageRequest {
+ required string application_key = 1;
+ required string message = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 000000000..8613cb731
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 000000000..e76f126ff
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 000000000..d538701ab
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 000000000..b59603f13
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 000000000..d5fa75be7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "net/http"
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil
+ }
+ return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
new file mode 100644
index 000000000..ba7c72206
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
@@ -0,0 +1,845 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/image/images_service.proto
+// DO NOT EDIT!
+
+/*
+Package image is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/image/images_service.proto
+
+It has these top-level messages:
+ ImagesServiceError
+ ImagesServiceTransform
+ Transform
+ ImageData
+ InputSettings
+ OutputSettings
+ ImagesTransformRequest
+ ImagesTransformResponse
+ CompositeImageOptions
+ ImagesCanvas
+ ImagesCompositeRequest
+ ImagesCompositeResponse
+ ImagesHistogramRequest
+ ImagesHistogram
+ ImagesHistogramResponse
+ ImagesGetUrlBaseRequest
+ ImagesGetUrlBaseResponse
+ ImagesDeleteUrlBaseRequest
+ ImagesDeleteUrlBaseResponse
+*/
+package image
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ImagesServiceError_ErrorCode int32
+
+const (
+ ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1
+ ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2
+ ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3
+ ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4
+ ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5
+ ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6
+ ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7
+ ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8
+)
+
+var ImagesServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "BAD_TRANSFORM_DATA",
+ 3: "NOT_IMAGE",
+ 4: "BAD_IMAGE_DATA",
+ 5: "IMAGE_TOO_LARGE",
+ 6: "INVALID_BLOB_KEY",
+ 7: "ACCESS_DENIED",
+ 8: "OBJECT_NOT_FOUND",
+}
+var ImagesServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "BAD_TRANSFORM_DATA": 2,
+ "NOT_IMAGE": 3,
+ "BAD_IMAGE_DATA": 4,
+ "IMAGE_TOO_LARGE": 5,
+ "INVALID_BLOB_KEY": 6,
+ "ACCESS_DENIED": 7,
+ "OBJECT_NOT_FOUND": 8,
+}
+
+func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {
+ p := new(ImagesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ImagesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ImagesServiceTransform_Type int32
+
+const (
+ ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1
+ ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2
+ ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3
+ ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4
+ ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5
+ ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6
+)
+
+var ImagesServiceTransform_Type_name = map[int32]string{
+ 1: "RESIZE",
+ 2: "ROTATE",
+ 3: "HORIZONTAL_FLIP",
+ 4: "VERTICAL_FLIP",
+ 5: "CROP",
+ 6: "IM_FEELING_LUCKY",
+}
+var ImagesServiceTransform_Type_value = map[string]int32{
+ "RESIZE": 1,
+ "ROTATE": 2,
+ "HORIZONTAL_FLIP": 3,
+ "VERTICAL_FLIP": 4,
+ "CROP": 5,
+ "IM_FEELING_LUCKY": 6,
+}
+
+func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {
+ p := new(ImagesServiceTransform_Type)
+ *p = x
+ return p
+}
+func (x ImagesServiceTransform_Type) String() string {
+ return proto.EnumName(ImagesServiceTransform_Type_name, int32(x))
+}
+func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceTransform_Type(value)
+ return nil
+}
+
+type InputSettings_ORIENTATION_CORRECTION_TYPE int32
+
+const (
+ InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0
+ InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1
+)
+
+var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{
+ 0: "UNCHANGED_ORIENTATION",
+ 1: "CORRECT_ORIENTATION",
+}
+var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{
+ "UNCHANGED_ORIENTATION": 0,
+ "CORRECT_ORIENTATION": 1,
+}
+
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {
+ p := new(InputSettings_ORIENTATION_CORRECTION_TYPE)
+ *p = x
+ return p
+}
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {
+ return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))
+}
+func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)
+ return nil
+}
+
+type OutputSettings_MIME_TYPE int32
+
+const (
+ OutputSettings_PNG OutputSettings_MIME_TYPE = 0
+ OutputSettings_JPEG OutputSettings_MIME_TYPE = 1
+ OutputSettings_WEBP OutputSettings_MIME_TYPE = 2
+)
+
+var OutputSettings_MIME_TYPE_name = map[int32]string{
+ 0: "PNG",
+ 1: "JPEG",
+ 2: "WEBP",
+}
+var OutputSettings_MIME_TYPE_value = map[string]int32{
+ "PNG": 0,
+ "JPEG": 1,
+ "WEBP": 2,
+}
+
+func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {
+ p := new(OutputSettings_MIME_TYPE)
+ *p = x
+ return p
+}
+func (x OutputSettings_MIME_TYPE) String() string {
+ return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))
+}
+func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = OutputSettings_MIME_TYPE(value)
+ return nil
+}
+
+type CompositeImageOptions_ANCHOR int32
+
+const (
+ CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0
+ CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1
+ CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2
+ CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3
+ CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4
+ CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5
+ CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6
+ CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7
+ CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8
+)
+
+var CompositeImageOptions_ANCHOR_name = map[int32]string{
+ 0: "TOP_LEFT",
+ 1: "TOP",
+ 2: "TOP_RIGHT",
+ 3: "LEFT",
+ 4: "CENTER",
+ 5: "RIGHT",
+ 6: "BOTTOM_LEFT",
+ 7: "BOTTOM",
+ 8: "BOTTOM_RIGHT",
+}
+var CompositeImageOptions_ANCHOR_value = map[string]int32{
+ "TOP_LEFT": 0,
+ "TOP": 1,
+ "TOP_RIGHT": 2,
+ "LEFT": 3,
+ "CENTER": 4,
+ "RIGHT": 5,
+ "BOTTOM_LEFT": 6,
+ "BOTTOM": 7,
+ "BOTTOM_RIGHT": 8,
+}
+
+func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {
+ p := new(CompositeImageOptions_ANCHOR)
+ *p = x
+ return p
+}
+func (x CompositeImageOptions_ANCHOR) String() string {
+ return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))
+}
+func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR")
+ if err != nil {
+ return err
+ }
+ *x = CompositeImageOptions_ANCHOR(value)
+ return nil
+}
+
+type ImagesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} }
+func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceError) ProtoMessage() {}
+
+type ImagesServiceTransform struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} }
+func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceTransform) ProtoMessage() {}
+
+type Transform struct {
+ Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+ CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"`
+ CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"`
+ CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"`
+ Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"`
+ HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"`
+ VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"`
+ CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"`
+ CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"`
+ CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"`
+ CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"`
+ Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"`
+ AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transform) Reset() { *m = Transform{} }
+func (m *Transform) String() string { return proto.CompactTextString(m) }
+func (*Transform) ProtoMessage() {}
+
+const Default_Transform_CropToFit bool = false
+const Default_Transform_CropOffsetX float32 = 0.5
+const Default_Transform_CropOffsetY float32 = 0.5
+const Default_Transform_Rotate int32 = 0
+const Default_Transform_HorizontalFlip bool = false
+const Default_Transform_VerticalFlip bool = false
+const Default_Transform_CropLeftX float32 = 0
+const Default_Transform_CropTopY float32 = 0
+const Default_Transform_CropRightX float32 = 1
+const Default_Transform_CropBottomY float32 = 1
+const Default_Transform_Autolevels bool = false
+const Default_Transform_AllowStretch bool = false
+
+func (m *Transform) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *Transform) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *Transform) GetCropToFit() bool {
+ if m != nil && m.CropToFit != nil {
+ return *m.CropToFit
+ }
+ return Default_Transform_CropToFit
+}
+
+func (m *Transform) GetCropOffsetX() float32 {
+ if m != nil && m.CropOffsetX != nil {
+ return *m.CropOffsetX
+ }
+ return Default_Transform_CropOffsetX
+}
+
+func (m *Transform) GetCropOffsetY() float32 {
+ if m != nil && m.CropOffsetY != nil {
+ return *m.CropOffsetY
+ }
+ return Default_Transform_CropOffsetY
+}
+
+func (m *Transform) GetRotate() int32 {
+ if m != nil && m.Rotate != nil {
+ return *m.Rotate
+ }
+ return Default_Transform_Rotate
+}
+
+func (m *Transform) GetHorizontalFlip() bool {
+ if m != nil && m.HorizontalFlip != nil {
+ return *m.HorizontalFlip
+ }
+ return Default_Transform_HorizontalFlip
+}
+
+func (m *Transform) GetVerticalFlip() bool {
+ if m != nil && m.VerticalFlip != nil {
+ return *m.VerticalFlip
+ }
+ return Default_Transform_VerticalFlip
+}
+
+func (m *Transform) GetCropLeftX() float32 {
+ if m != nil && m.CropLeftX != nil {
+ return *m.CropLeftX
+ }
+ return Default_Transform_CropLeftX
+}
+
+func (m *Transform) GetCropTopY() float32 {
+ if m != nil && m.CropTopY != nil {
+ return *m.CropTopY
+ }
+ return Default_Transform_CropTopY
+}
+
+func (m *Transform) GetCropRightX() float32 {
+ if m != nil && m.CropRightX != nil {
+ return *m.CropRightX
+ }
+ return Default_Transform_CropRightX
+}
+
+func (m *Transform) GetCropBottomY() float32 {
+ if m != nil && m.CropBottomY != nil {
+ return *m.CropBottomY
+ }
+ return Default_Transform_CropBottomY
+}
+
+func (m *Transform) GetAutolevels() bool {
+ if m != nil && m.Autolevels != nil {
+ return *m.Autolevels
+ }
+ return Default_Transform_Autolevels
+}
+
+func (m *Transform) GetAllowStretch() bool {
+ if m != nil && m.AllowStretch != nil {
+ return *m.AllowStretch
+ }
+ return Default_Transform_AllowStretch
+}
+
+type ImageData struct {
+ Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"`
+ BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"`
+ Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImageData) Reset() { *m = ImageData{} }
+func (m *ImageData) String() string { return proto.CompactTextString(m) }
+func (*ImageData) ProtoMessage() {}
+
+func (m *ImageData) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *ImageData) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImageData) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImageData) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+type InputSettings struct {
+ CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"`
+ ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"`
+ TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InputSettings) Reset() { *m = InputSettings{} }
+func (m *InputSettings) String() string { return proto.CompactTextString(m) }
+func (*InputSettings) ProtoMessage() {}
+
+const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION
+const Default_InputSettings_ParseMetadata bool = false
+
+func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {
+ if m != nil && m.CorrectExifOrientation != nil {
+ return *m.CorrectExifOrientation
+ }
+ return Default_InputSettings_CorrectExifOrientation
+}
+
+func (m *InputSettings) GetParseMetadata() bool {
+ if m != nil && m.ParseMetadata != nil {
+ return *m.ParseMetadata
+ }
+ return Default_InputSettings_ParseMetadata
+}
+
+func (m *InputSettings) GetTransparentSubstitutionRgb() int32 {
+ if m != nil && m.TransparentSubstitutionRgb != nil {
+ return *m.TransparentSubstitutionRgb
+ }
+ return 0
+}
+
+type OutputSettings struct {
+ MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"`
+ Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OutputSettings) Reset() { *m = OutputSettings{} }
+func (m *OutputSettings) String() string { return proto.CompactTextString(m) }
+func (*OutputSettings) ProtoMessage() {}
+
+const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG
+
+func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {
+ if m != nil && m.MimeType != nil {
+ return *m.MimeType
+ }
+ return Default_OutputSettings_MimeType
+}
+
+func (m *OutputSettings) GetQuality() int32 {
+ if m != nil && m.Quality != nil {
+ return *m.Quality
+ }
+ return 0
+}
+
+type ImagesTransformRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} }
+func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformRequest) ProtoMessage() {}
+
+func (m *ImagesTransformRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetTransform() []*Transform {
+ if m != nil {
+ return m.Transform
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetInput() *InputSettings {
+ if m != nil {
+ return m.Input
+ }
+ return nil
+}
+
+type ImagesTransformResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} }
+func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformResponse) ProtoMessage() {}
+
+func (m *ImagesTransformResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformResponse) GetSourceMetadata() string {
+ if m != nil && m.SourceMetadata != nil {
+ return *m.SourceMetadata
+ }
+ return ""
+}
+
+type CompositeImageOptions struct {
+ SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"`
+ XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"`
+ YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"`
+ Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"`
+ Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} }
+func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }
+func (*CompositeImageOptions) ProtoMessage() {}
+
+func (m *CompositeImageOptions) GetSourceIndex() int32 {
+ if m != nil && m.SourceIndex != nil {
+ return *m.SourceIndex
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetXOffset() int32 {
+ if m != nil && m.XOffset != nil {
+ return *m.XOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetYOffset() int32 {
+ if m != nil && m.YOffset != nil {
+ return *m.YOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetOpacity() float32 {
+ if m != nil && m.Opacity != nil {
+ return *m.Opacity
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {
+ if m != nil && m.Anchor != nil {
+ return *m.Anchor
+ }
+ return CompositeImageOptions_TOP_LEFT
+}
+
+type ImagesCanvas struct {
+ Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} }
+func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }
+func (*ImagesCanvas) ProtoMessage() {}
+
+const Default_ImagesCanvas_Color int32 = -1
+
+func (m *ImagesCanvas) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesCanvas) GetColor() int32 {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Default_ImagesCanvas_Color
+}
+
+type ImagesCompositeRequest struct {
+ Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"`
+ Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} }
+func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeRequest) ProtoMessage() {}
+
+func (m *ImagesCompositeRequest) GetImage() []*ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {
+ if m != nil {
+ return m.Canvas
+ }
+ return nil
+}
+
+type ImagesCompositeResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} }
+func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeResponse) ProtoMessage() {}
+
+func (m *ImagesCompositeResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogramRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} }
+func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramRequest) ProtoMessage() {}
+
+func (m *ImagesHistogramRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogram struct {
+ Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"`
+ Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"`
+ Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} }
+func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogram) ProtoMessage() {}
+
+func (m *ImagesHistogram) GetRed() []int32 {
+ if m != nil {
+ return m.Red
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetGreen() []int32 {
+ if m != nil {
+ return m.Green
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetBlue() []int32 {
+ if m != nil {
+ return m.Blue
+ }
+ return nil
+}
+
+type ImagesHistogramResponse struct {
+ Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} }
+func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramResponse) ProtoMessage() {}
+
+func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+type ImagesGetUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} }
+func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseRequest) ProtoMessage() {}
+
+const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false
+
+func (m *ImagesGetUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {
+ if m != nil && m.CreateSecureUrl != nil {
+ return *m.CreateSecureUrl
+ }
+ return Default_ImagesGetUrlBaseRequest_CreateSecureUrl
+}
+
+type ImagesGetUrlBaseResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} }
+func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseResponse) ProtoMessage() {}
+
+func (m *ImagesGetUrlBaseResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} }
+func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {}
+
+func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} }
+func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto
new file mode 100644
index 000000000..f0d2ed5d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+option go_package = "image";
+
+package appengine;
+
+message ImagesServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ BAD_TRANSFORM_DATA = 2;
+ NOT_IMAGE = 3;
+ BAD_IMAGE_DATA = 4;
+ IMAGE_TOO_LARGE = 5;
+ INVALID_BLOB_KEY = 6;
+ ACCESS_DENIED = 7;
+ OBJECT_NOT_FOUND = 8;
+ }
+}
+
+message ImagesServiceTransform {
+ enum Type {
+ RESIZE = 1;
+ ROTATE = 2;
+ HORIZONTAL_FLIP = 3;
+ VERTICAL_FLIP = 4;
+ CROP = 5;
+ IM_FEELING_LUCKY = 6;
+ }
+}
+
+message Transform {
+ optional int32 width = 1;
+ optional int32 height = 2;
+ optional bool crop_to_fit = 11 [default = false];
+ optional float crop_offset_x = 12 [default = 0.5];
+ optional float crop_offset_y = 13 [default = 0.5];
+
+ optional int32 rotate = 3 [default = 0];
+
+ optional bool horizontal_flip = 4 [default = false];
+
+ optional bool vertical_flip = 5 [default = false];
+
+ optional float crop_left_x = 6 [default = 0.0];
+ optional float crop_top_y = 7 [default = 0.0];
+ optional float crop_right_x = 8 [default = 1.0];
+ optional float crop_bottom_y = 9 [default = 1.0];
+
+ optional bool autolevels = 10 [default = false];
+
+ optional bool allow_stretch = 14 [default = false];
+}
+
+message ImageData {
+ required bytes content = 1 [ctype=CORD];
+ optional string blob_key = 2;
+
+ optional int32 width = 3;
+ optional int32 height = 4;
+}
+
+message InputSettings {
+ enum ORIENTATION_CORRECTION_TYPE {
+ UNCHANGED_ORIENTATION = 0;
+ CORRECT_ORIENTATION = 1;
+ }
+ optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1
+ [default=UNCHANGED_ORIENTATION];
+ optional bool parse_metadata = 2 [default=false];
+ optional int32 transparent_substitution_rgb = 3;
+}
+
+message OutputSettings {
+ enum MIME_TYPE {
+ PNG = 0;
+ JPEG = 1;
+ WEBP = 2;
+ }
+
+ optional MIME_TYPE mime_type = 1 [default=PNG];
+ optional int32 quality = 2;
+}
+
+message ImagesTransformRequest {
+ required ImageData image = 1;
+ repeated Transform transform = 2;
+ required OutputSettings output = 3;
+ optional InputSettings input = 4;
+}
+
+message ImagesTransformResponse {
+ required ImageData image = 1;
+ optional string source_metadata = 2;
+}
+
+message CompositeImageOptions {
+ required int32 source_index = 1;
+ required int32 x_offset = 2;
+ required int32 y_offset = 3;
+ required float opacity = 4;
+
+ enum ANCHOR {
+ TOP_LEFT = 0;
+ TOP = 1;
+ TOP_RIGHT = 2;
+ LEFT = 3;
+ CENTER = 4;
+ RIGHT = 5;
+ BOTTOM_LEFT = 6;
+ BOTTOM = 7;
+ BOTTOM_RIGHT = 8;
+ }
+
+ required ANCHOR anchor = 5;
+}
+
+message ImagesCanvas {
+ required int32 width = 1;
+ required int32 height = 2;
+ required OutputSettings output = 3;
+ optional int32 color = 4 [default=-1];
+}
+
+message ImagesCompositeRequest {
+ repeated ImageData image = 1;
+ repeated CompositeImageOptions options = 2;
+ required ImagesCanvas canvas = 3;
+}
+
+message ImagesCompositeResponse {
+ required ImageData image = 1;
+}
+
+message ImagesHistogramRequest {
+ required ImageData image = 1;
+}
+
+message ImagesHistogram {
+ repeated int32 red = 1;
+ repeated int32 green = 2;
+ repeated int32 blue = 3;
+}
+
+message ImagesHistogramResponse {
+ required ImagesHistogram histogram = 1;
+}
+
+message ImagesGetUrlBaseRequest {
+ required string blob_key = 1;
+
+ optional bool create_secure_url = 2 [default = false];
+}
+
+message ImagesGetUrlBaseResponse {
+ required string url = 1;
+}
+
+message ImagesDeleteUrlBaseRequest {
+ required string blob_key = 1;
+}
+
+message ImagesDeleteUrlBaseResponse {
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 000000000..051ea3980
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
new file mode 100644
index 000000000..f8097616b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestInstallingHealthChecker(t *testing.T) {
+ try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) {
+ installHealthChecker(mux)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL + "/_ah/health")
+ if err != nil {
+ t.Errorf("%s: http.Get: %v", desc, err)
+ return
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("%s: reading body: %v", desc, err)
+ return
+ }
+
+ if resp.StatusCode != wantCode {
+ t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode)
+ return
+ }
+ if wantBody != "" && string(body) != wantBody {
+ t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody)
+ return
+ }
+ }
+
+ // If there's no handlers, or only a root handler, a health checker should be installed.
+ try("empty mux", http.NewServeMux(), 200, "ok")
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "root handler")
+ })
+ try("mux with root handler", mux, 200, "ok")
+
+ // If there's a custom health check handler, one should not be installed.
+ mux = http.NewServeMux()
+ mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(418)
+ io.WriteString(w, "I'm short and stout!")
+ })
+ try("mux with custom health checker", mux, 418, "I'm short and stout!")
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 000000000..20c595be3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,899 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+ LogServiceError
+ UserAppLogLine
+ UserAppLogGroup
+ FlushRequest
+ SetStatusRequest
+ LogOffset
+ LogLine
+ RequestLog
+ LogModuleVersion
+ LogReadRequest
+ LogReadResponse
+ LogUsageRecord
+ LogUsageRequest
+ LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+
+type LogServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 000000000..8981dc475
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
new file mode 100644
index 000000000..b8d5f0301
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
@@ -0,0 +1,229 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/mail/mail_service.proto
+// DO NOT EDIT!
+
+/*
+Package mail is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/mail/mail_service.proto
+
+It has these top-level messages:
+ MailServiceError
+ MailAttachment
+ MailHeader
+ MailMessage
+*/
+package mail
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MailServiceError_ErrorCode int32
+
+const (
+ MailServiceError_OK MailServiceError_ErrorCode = 0
+ MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1
+ MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2
+ MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3
+ MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4
+ MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5
+ MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6
+)
+
+var MailServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BAD_REQUEST",
+ 3: "UNAUTHORIZED_SENDER",
+ 4: "INVALID_ATTACHMENT_TYPE",
+ 5: "INVALID_HEADER_NAME",
+ 6: "INVALID_CONTENT_ID",
+}
+var MailServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BAD_REQUEST": 2,
+ "UNAUTHORIZED_SENDER": 3,
+ "INVALID_ATTACHMENT_TYPE": 4,
+ "INVALID_HEADER_NAME": 5,
+ "INVALID_CONTENT_ID": 6,
+}
+
+func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {
+ p := new(MailServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MailServiceError_ErrorCode) String() string {
+ return proto.EnumName(MailServiceError_ErrorCode_name, int32(x))
+}
+func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MailServiceError_ErrorCode(value)
+ return nil
+}
+
+type MailServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailServiceError) Reset() { *m = MailServiceError{} }
+func (m *MailServiceError) String() string { return proto.CompactTextString(m) }
+func (*MailServiceError) ProtoMessage() {}
+
+type MailAttachment struct {
+ FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
+ ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailAttachment) Reset() { *m = MailAttachment{} }
+func (m *MailAttachment) String() string { return proto.CompactTextString(m) }
+func (*MailAttachment) ProtoMessage() {}
+
+func (m *MailAttachment) GetFileName() string {
+ if m != nil && m.FileName != nil {
+ return *m.FileName
+ }
+ return ""
+}
+
+func (m *MailAttachment) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *MailAttachment) GetContentID() string {
+ if m != nil && m.ContentID != nil {
+ return *m.ContentID
+ }
+ return ""
+}
+
+type MailHeader struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailHeader) Reset() { *m = MailHeader{} }
+func (m *MailHeader) String() string { return proto.CompactTextString(m) }
+func (*MailHeader) ProtoMessage() {}
+
+func (m *MailHeader) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MailHeader) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type MailMessage struct {
+ Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"`
+ ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"`
+ To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"`
+ Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"`
+ Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"`
+ Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"`
+ TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"`
+ HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"`
+ Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"`
+ Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailMessage) Reset() { *m = MailMessage{} }
+func (m *MailMessage) String() string { return proto.CompactTextString(m) }
+func (*MailMessage) ProtoMessage() {}
+
+func (m *MailMessage) GetSender() string {
+ if m != nil && m.Sender != nil {
+ return *m.Sender
+ }
+ return ""
+}
+
+func (m *MailMessage) GetReplyTo() string {
+ if m != nil && m.ReplyTo != nil {
+ return *m.ReplyTo
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTo() []string {
+ if m != nil {
+ return m.To
+ }
+ return nil
+}
+
+func (m *MailMessage) GetCc() []string {
+ if m != nil {
+ return m.Cc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetBcc() []string {
+ if m != nil {
+ return m.Bcc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetSubject() string {
+ if m != nil && m.Subject != nil {
+ return *m.Subject
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTextBody() string {
+ if m != nil && m.TextBody != nil {
+ return *m.TextBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetHtmlBody() string {
+ if m != nil && m.HtmlBody != nil {
+ return *m.HtmlBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetAttachment() []*MailAttachment {
+ if m != nil {
+ return m.Attachment
+ }
+ return nil
+}
+
+func (m *MailMessage) GetHeader() []*MailHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
new file mode 100644
index 000000000..4e57b7aa5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+option go_package = "mail";
+
+package appengine;
+
+message MailServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BAD_REQUEST = 2;
+ UNAUTHORIZED_SENDER = 3;
+ INVALID_ATTACHMENT_TYPE = 4;
+ INVALID_HEADER_NAME = 5;
+ INVALID_CONTENT_ID = 6;
+ }
+}
+
+message MailAttachment {
+ required string FileName = 1;
+ required bytes Data = 2;
+ optional string ContentID = 3;
+}
+
+message MailHeader {
+ required string name = 1;
+ required string value = 2;
+}
+
+message MailMessage {
+ required string Sender = 1;
+ optional string ReplyTo = 2;
+
+ repeated string To = 3;
+ repeated string Cc = 4;
+ repeated string Bcc = 5;
+
+ required string Subject = 6;
+
+ optional string TextBody = 7;
+ optional string HtmlBody = 8;
+
+ repeated MailAttachment Attachment = 9;
+
+ repeated MailHeader Header = 10;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 000000000..49036163c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine_internal"
+)
+
+func Main() {
+ appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 000000000..822e784a4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ host := ""
+ if IsDevAppServer() {
+ host = "127.0.0.1"
+ }
+ if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 000000000..252fef869
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,938 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+ MemcacheServiceError
+ AppOverride
+ MemcacheGetRequest
+ MemcacheGetResponse
+ MemcacheSetRequest
+ MemcacheSetResponse
+ MemcacheDeleteRequest
+ MemcacheDeleteResponse
+ MemcacheIncrementRequest
+ MemcacheIncrementResponse
+ MemcacheBatchIncrementRequest
+ MemcacheBatchIncrementResponse
+ MemcacheFlushRequest
+ MemcacheFlushResponse
+ MemcacheStatsRequest
+ MergedNamespaceStats
+ MemcacheStatsResponse
+ MemcacheGrabTailRequest
+ MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+ MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0
+ MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+ MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+ MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+ MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNSPECIFIED_ERROR",
+ 2: "NAMESPACE_NOT_SET",
+ 3: "PERMISSION_DENIED",
+ 6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNSPECIFIED_ERROR": 1,
+ "NAMESPACE_NOT_SET": 2,
+ "PERMISSION_DENIED": 3,
+ "INVALID_VALUE": 6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+ p := new(MemcacheServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+ return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheServiceError_ErrorCode(value)
+ return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+ MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1
+ MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2
+ MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+ MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+ 1: "SET",
+ 2: "ADD",
+ 3: "REPLACE",
+ 4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+ "SET": 1,
+ "ADD": 2,
+ "REPLACE": 3,
+ "CAS": 4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+ p := new(MemcacheSetRequest_SetPolicy)
+ *p = x
+ return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+ return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetRequest_SetPolicy(value)
+ return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+ MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1
+ MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+ MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3
+ MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+ 1: "STORED",
+ 2: "NOT_STORED",
+ 3: "ERROR",
+ 4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+ "STORED": 1,
+ "NOT_STORED": 2,
+ "ERROR": 3,
+ "EXISTS": 4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+ p := new(MemcacheSetResponse_SetStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+ return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetResponse_SetStatusCode(value)
+ return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+ MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1
+ MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+ 1: "DELETED",
+ 2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+ "DELETED": 1,
+ "NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+ p := new(MemcacheDeleteResponse_DeleteStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+ return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheDeleteResponse_DeleteStatusCode(value)
+ return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+ MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+ MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+ 1: "INCREMENT",
+ 2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+ "INCREMENT": 1,
+ "DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+ p := new(MemcacheIncrementRequest_Direction)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+ return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementRequest_Direction(value)
+ return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+ MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1
+ MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+ MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+ 1: "OK",
+ 2: "NOT_CHANGED",
+ 3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+ "OK": 1,
+ "NOT_CHANGED": 2,
+ "ERROR": 3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+ p := new(MemcacheIncrementResponse_IncrementStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+ return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementResponse_IncrementStatusCode(value)
+ return nil
+}
+
+type MemcacheServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage() {}
+
+type AppOverride struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+ IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+ MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+ MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppOverride) Reset() { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage() {}
+
+func (m *AppOverride) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+ if m != nil && m.NumMemcachegBackends != nil {
+ return *m.NumMemcachegBackends
+ }
+ return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+ if m != nil && m.IgnoreShardlock != nil {
+ return *m.IgnoreShardlock
+ }
+ return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+ if m != nil && m.MemcachePoolHint != nil {
+ return *m.MemcachePoolHint
+ }
+ return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+ if m != nil {
+ return m.MemcacheShardingStrategy
+ }
+ return nil
+}
+
+type MemcacheGetRequest struct {
+ Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage() {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGetResponse struct {
+ Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage() {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGetResponse_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+ ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+ if m != nil && m.ExpiresInSeconds != nil {
+ return *m.ExpiresInSeconds
+ }
+ return 0
+}
+
+type MemcacheSetRequest struct {
+ Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage() {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheSetRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+ ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+ ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+ if m != nil && m.SetPolicy != nil {
+ return *m.SetPolicy
+ }
+ return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+type MemcacheSetResponse struct {
+ SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage() {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+ if m != nil {
+ return m.SetStatus
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest struct {
+ Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage() {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+ if m != nil && m.DeleteTime != nil {
+ return *m.DeleteTime
+ }
+ return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+ DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage() {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+ if m != nil {
+ return m.DeleteStatus
+ }
+ return nil
+}
+
+type MemcacheIncrementRequest struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+ Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+ InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+ InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+ Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage() {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+ if m != nil && m.Delta != nil {
+ return *m.Delta
+ }
+ return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+ if m != nil && m.InitialValue != nil {
+ return *m.InitialValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+ if m != nil && m.InitialFlags != nil {
+ return *m.InitialFlags
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheIncrementResponse struct {
+ NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+ IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+ if m != nil && m.NewValue != nil {
+ return *m.NewValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+ if m != nil && m.IncrementStatus != nil {
+ return *m.IncrementStatus
+ }
+ return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+ NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+ Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheFlushRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage() {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheFlushResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage() {}
+
+type MemcacheStatsRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage() {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MergedNamespaceStats struct {
+ Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+ Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+ ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+ Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+ Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+ OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage() {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+ if m != nil && m.Misses != nil {
+ return *m.Misses
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+ if m != nil && m.ByteHits != nil {
+ return *m.ByteHits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+ if m != nil && m.OldestItemAge != nil {
+ return *m.OldestItemAge
+ }
+ return 0
+}
+
+type MemcacheStatsResponse struct {
+ Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage() {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type MemcacheGrabTailRequest struct {
+ ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage() {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+ if m != nil && m.ItemCount != nil {
+ return *m.ItemCount
+ }
+ return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse struct {
+ Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 000000000..5f0edcdc7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNSPECIFIED_ERROR = 1;
+ NAMESPACE_NOT_SET = 2;
+ PERMISSION_DENIED = 3;
+ INVALID_VALUE = 6;
+ }
+}
+
+message AppOverride {
+ required string app_id = 1;
+
+ optional int32 num_memcacheg_backends = 2 [deprecated=true];
+ optional bool ignore_shardlock = 3 [deprecated=true];
+ optional string memcache_pool_hint = 4 [deprecated=true];
+ optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+ repeated bytes key = 1;
+ optional string name_space = 2 [default = ""];
+ optional bool for_cas = 4;
+ optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+ optional fixed32 flags = 4;
+ optional fixed64 cas_id = 5;
+ optional int32 expires_in_seconds = 6;
+ }
+}
+
+message MemcacheSetRequest {
+ enum SetPolicy {
+ SET = 1;
+ ADD = 2;
+ REPLACE = 3;
+ CAS = 4;
+ }
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+
+ optional fixed32 flags = 4;
+ optional SetPolicy set_policy = 5 [default = SET];
+ optional fixed32 expiration_time = 6 [default = 0];
+
+ optional fixed64 cas_id = 8;
+ optional bool for_cas = 9;
+ }
+ optional string name_space = 7 [default = ""];
+ optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+ enum SetStatusCode {
+ STORED = 1;
+ NOT_STORED = 2;
+ ERROR = 3;
+ EXISTS = 4;
+ }
+ repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ optional fixed32 delete_time = 3 [default = 0];
+ }
+ optional string name_space = 4 [default = ""];
+ optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+ enum DeleteStatusCode {
+ DELETED = 1;
+ NOT_FOUND = 2;
+ }
+ repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+ enum Direction {
+ INCREMENT = 1;
+ DECREMENT = 2;
+ }
+ required bytes key = 1;
+ optional string name_space = 4 [default = ""];
+
+ optional uint64 delta = 2 [default = 1];
+ optional Direction direction = 3 [default = INCREMENT];
+
+ optional uint64 initial_value = 5;
+ optional fixed32 initial_flags = 6;
+ optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+ enum IncrementStatusCode {
+ OK = 1;
+ NOT_CHANGED = 2;
+ ERROR = 3;
+ }
+
+ optional uint64 new_value = 1;
+ optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+ optional string name_space = 1 [default = ""];
+ repeated MemcacheIncrementRequest item = 2;
+ optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+ repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+ optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+ optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+ required uint64 hits = 1;
+ required uint64 misses = 2;
+ required uint64 byte_hits = 3;
+
+ required uint64 items = 4;
+ required uint64 bytes = 5;
+
+ required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+ optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+ required int32 item_count = 1;
+ optional string name_space = 2 [default = ""];
+ optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+ repeated group Item = 1 {
+ required bytes value = 2;
+ optional fixed32 flags = 3;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 000000000..9cc1f71d1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 000000000..a0145ed31
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 000000000..d29f0065a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 000000000..3b94cf0c6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go
new file mode 100644
index 000000000..24da8bb2b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+)
+
+func TestDialLimit(t *testing.T) {
+ // Fill up semaphore with false acquisitions to permit only two TCP connections at a time.
+ // We don't replace limitSem because that results in a data race when net/http lazily closes connections.
+ nFake := cap(limitSem) - 2
+ for i := 0; i < nFake; i++ {
+ limitSem <- 1
+ }
+ defer func() {
+ for i := 0; i < nFake; i++ {
+ <-limitSem
+ }
+ }()
+
+ f, c, cleanup := setup() // setup is in api_test.go
+ defer cleanup()
+ f.hang = make(chan int)
+
+ // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed),
+ // then the simple Non200 RPC should hang.
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{})
+ }()
+ }
+ time.Sleep(50 * time.Millisecond) // let those two RPCs start
+
+ ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond)
+ err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{})
+ if err != errTimeout {
+ t.Errorf("Non200 RPC returned with err %v, want errTimeout", err)
+ }
+
+ // Drain the two RunSlowly calls.
+ f.hang <- 1
+ f.hang <- 1
+ wg.Wait()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 000000000..2fdb546a6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 000000000..526bd39e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 000000000..f21763a4e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 000000000..3b280e4a1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2488 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+ Scope
+ Entry
+ AccessControlList
+ FieldValue
+ Field
+ FieldTypes
+ IndexShardSettings
+ FacetValue
+ Facet
+ DocumentMetadata
+ Document
+ SearchServiceError
+ RequestStatus
+ IndexSpec
+ IndexMetadata
+ IndexDocumentParams
+ IndexDocumentRequest
+ IndexDocumentResponse
+ DeleteDocumentParams
+ DeleteDocumentRequest
+ DeleteDocumentResponse
+ ListDocumentsParams
+ ListDocumentsRequest
+ ListDocumentsResponse
+ ListIndexesParams
+ ListIndexesRequest
+ ListIndexesResponse
+ DeleteSchemaParams
+ DeleteSchemaRequest
+ DeleteSchemaResponse
+ SortSpec
+ ScorerSpec
+ FieldSpec
+ FacetRange
+ FacetRequestParam
+ FacetAutoDetectParam
+ FacetRequest
+ FacetRefinement
+ SearchParams
+ SearchRequest
+ FacetResultValue
+ FacetResult
+ SearchResult
+ SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Scope_Type int32
+
+const (
+ Scope_USER_BY_CANONICAL_ID Scope_Type = 1
+ Scope_USER_BY_EMAIL Scope_Type = 2
+ Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3
+ Scope_GROUP_BY_EMAIL Scope_Type = 4
+ Scope_GROUP_BY_DOMAIN Scope_Type = 5
+ Scope_ALL_USERS Scope_Type = 6
+ Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+ 1: "USER_BY_CANONICAL_ID",
+ 2: "USER_BY_EMAIL",
+ 3: "GROUP_BY_CANONICAL_ID",
+ 4: "GROUP_BY_EMAIL",
+ 5: "GROUP_BY_DOMAIN",
+ 6: "ALL_USERS",
+ 7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+ "USER_BY_CANONICAL_ID": 1,
+ "USER_BY_EMAIL": 2,
+ "GROUP_BY_CANONICAL_ID": 3,
+ "GROUP_BY_EMAIL": 4,
+ "GROUP_BY_DOMAIN": 5,
+ "ALL_USERS": 6,
+ "ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+ p := new(Scope_Type)
+ *p = x
+ return p
+}
+func (x Scope_Type) String() string {
+ return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+ if err != nil {
+ return err
+ }
+ *x = Scope_Type(value)
+ return nil
+}
+func (Scope_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+type Entry_Permission int32
+
+const (
+ Entry_READ Entry_Permission = 1
+ Entry_WRITE Entry_Permission = 2
+ Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+ 1: "READ",
+ 2: "WRITE",
+ 3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+ "READ": 1,
+ "WRITE": 2,
+ "FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+ p := new(Entry_Permission)
+ *p = x
+ return p
+}
+func (x Entry_Permission) String() string {
+ return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+ if err != nil {
+ return err
+ }
+ *x = Entry_Permission(value)
+ return nil
+}
+func (Entry_Permission) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
+
+type FieldValue_ContentType int32
+
+const (
+ FieldValue_TEXT FieldValue_ContentType = 0
+ FieldValue_HTML FieldValue_ContentType = 1
+ FieldValue_ATOM FieldValue_ContentType = 2
+ FieldValue_DATE FieldValue_ContentType = 3
+ FieldValue_NUMBER FieldValue_ContentType = 4
+ FieldValue_GEO FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+ 0: "TEXT",
+ 1: "HTML",
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+ 5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+ "TEXT": 0,
+ "HTML": 1,
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+ "GEO": 5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+ p := new(FieldValue_ContentType)
+ *p = x
+ return p
+}
+func (x FieldValue_ContentType) String() string {
+ return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FieldValue_ContentType(value)
+ return nil
+}
+func (FieldValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+type FacetValue_ContentType int32
+
+const (
+ FacetValue_ATOM FacetValue_ContentType = 2
+ FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+ 2: "ATOM",
+ 4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+ "ATOM": 2,
+ "NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+ p := new(FacetValue_ContentType)
+ *p = x
+ return p
+}
+func (x FacetValue_ContentType) String() string {
+ return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FacetValue_ContentType(value)
+ return nil
+}
+func (FacetValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
+
+type Document_OrderIdSource int32
+
+const (
+ Document_DEFAULTED Document_OrderIdSource = 0
+ Document_SUPPLIED Document_OrderIdSource = 1
+)
+
+var Document_OrderIdSource_name = map[int32]string{
+ 0: "DEFAULTED",
+ 1: "SUPPLIED",
+}
+var Document_OrderIdSource_value = map[string]int32{
+ "DEFAULTED": 0,
+ "SUPPLIED": 1,
+}
+
+func (x Document_OrderIdSource) Enum() *Document_OrderIdSource {
+ p := new(Document_OrderIdSource)
+ *p = x
+ return p
+}
+func (x Document_OrderIdSource) String() string {
+ return proto.EnumName(Document_OrderIdSource_name, int32(x))
+}
+func (x *Document_OrderIdSource) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_OrderIdSource_value, data, "Document_OrderIdSource")
+ if err != nil {
+ return err
+ }
+ *x = Document_OrderIdSource(value)
+ return nil
+}
+func (Document_OrderIdSource) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
+
+type Document_Storage int32
+
+const (
+ Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+ 0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+ "DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+ p := new(Document_Storage)
+ *p = x
+ return p
+}
+func (x Document_Storage) String() string {
+ return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+ if err != nil {
+ return err
+ }
+ *x = Document_Storage(value)
+ return nil
+}
+func (Document_Storage) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 1} }
+
+type SearchServiceError_ErrorCode int32
+
+const (
+ SearchServiceError_OK SearchServiceError_ErrorCode = 0
+ SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1
+ SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2
+ SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3
+ SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4
+ SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5
+ SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "PERMISSION_DENIED",
+ 5: "TIMEOUT",
+ 6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "PERMISSION_DENIED": 4,
+ "TIMEOUT": 5,
+ "CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+ p := new(SearchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+ return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SearchServiceError_ErrorCode(value)
+ return nil
+}
+func (SearchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{11, 0}
+}
+
+type IndexSpec_Consistency int32
+
+const (
+ IndexSpec_GLOBAL IndexSpec_Consistency = 0
+ IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+ 0: "GLOBAL",
+ 1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+ "GLOBAL": 0,
+ "PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+ p := new(IndexSpec_Consistency)
+ *p = x
+ return p
+}
+func (x IndexSpec_Consistency) String() string {
+ return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Consistency(value)
+ return nil
+}
+func (IndexSpec_Consistency) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+type IndexSpec_Source int32
+
+const (
+ IndexSpec_SEARCH IndexSpec_Source = 0
+ IndexSpec_DATASTORE IndexSpec_Source = 1
+ IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+ 0: "SEARCH",
+ 1: "DATASTORE",
+ 2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+ "SEARCH": 0,
+ "DATASTORE": 1,
+ "CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+ p := new(IndexSpec_Source)
+ *p = x
+ return p
+}
+func (x IndexSpec_Source) String() string {
+ return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Source(value)
+ return nil
+}
+func (IndexSpec_Source) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 1} }
+
+type IndexSpec_Mode int32
+
+const (
+ IndexSpec_PRIORITY IndexSpec_Mode = 0
+ IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+ 0: "PRIORITY",
+ 1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+ "PRIORITY": 0,
+ "BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+ p := new(IndexSpec_Mode)
+ *p = x
+ return p
+}
+func (x IndexSpec_Mode) String() string {
+ return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Mode(value)
+ return nil
+}
+func (IndexSpec_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 2} }
+
+type IndexDocumentParams_Freshness int32
+
+const (
+ IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0
+ IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+ 0: "SYNCHRONOUSLY",
+ 1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+ "SYNCHRONOUSLY": 0,
+ "WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+ p := new(IndexDocumentParams_Freshness)
+ *p = x
+ return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+ return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+ if err != nil {
+ return err
+ }
+ *x = IndexDocumentParams_Freshness(value)
+ return nil
+}
+func (IndexDocumentParams_Freshness) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{15, 0}
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+ ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+ ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+ 0: "RESCORING_MATCH_SCORER",
+ 2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+ "RESCORING_MATCH_SCORER": 0,
+ "MATCH_SCORER": 2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+ p := new(ScorerSpec_Scorer)
+ *p = x
+ return p
+}
+func (x ScorerSpec_Scorer) String() string {
+ return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+ if err != nil {
+ return err
+ }
+ *x = ScorerSpec_Scorer(value)
+ return nil
+}
+func (ScorerSpec_Scorer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{31, 0} }
+
+type SearchParams_CursorType int32
+
+const (
+ SearchParams_NONE SearchParams_CursorType = 0
+ SearchParams_SINGLE SearchParams_CursorType = 1
+ SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+ 0: "NONE",
+ 1: "SINGLE",
+ 2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+ "NONE": 0,
+ "SINGLE": 1,
+ "PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+ p := new(SearchParams_CursorType)
+ *p = x
+ return p
+}
+func (x SearchParams_CursorType) String() string {
+ return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_CursorType(value)
+ return nil
+}
+func (SearchParams_CursorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} }
+
+type SearchParams_ParsingMode int32
+
+const (
+ SearchParams_STRICT SearchParams_ParsingMode = 0
+ SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+ 0: "STRICT",
+ 1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+ "STRICT": 0,
+ "RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+ p := new(SearchParams_ParsingMode)
+ *p = x
+ return p
+}
+func (x SearchParams_ParsingMode) String() string {
+ return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_ParsingMode(value)
+ return nil
+}
+func (SearchParams_ParsingMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 1} }
+
+type Scope struct {
+ Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Scope) Reset() { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage() {}
+func (*Scope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Scope) GetType() Scope_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Entry struct {
+ Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+ DisplayName *string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Entry) GetScope() *Scope {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+ if m != nil && m.Permission != nil {
+ return *m.Permission
+ }
+ return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+ if m != nil && m.DisplayName != nil {
+ return *m.DisplayName
+ }
+ return ""
+}
+
+type AccessControlList struct {
+ Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+ Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AccessControlList) Reset() { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage() {}
+func (*AccessControlList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *AccessControlList) GetOwner() string {
+ if m != nil && m.Owner != nil {
+ return *m.Owner
+ }
+ return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type FieldValue struct {
+ Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo,json=geo" json:"geo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue) Reset() { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage() {}
+func (*FieldValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+ if m != nil {
+ return m.Geo
+ }
+ return nil
+}
+
+type FieldValue_Geo struct {
+ Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+ Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage() {}
+func (*FieldValue_Geo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+func (m *FieldValue_Geo) GetLat() float64 {
+ if m != nil && m.Lat != nil {
+ return *m.Lat
+ }
+ return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+ if m != nil && m.Lng != nil {
+ return *m.Lng
+ }
+ return 0
+}
+
+type Field struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *Field) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type FieldTypes struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldTypes) Reset() { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage() {}
+func (*FieldTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *FieldTypes) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+type IndexShardSettings struct {
+ PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards,json=prevNumShards" json:"prev_num_shards,omitempty"`
+ NumShards *int32 `protobuf:"varint,2,req,name=num_shards,json=numShards,def=1" json:"num_shards,omitempty"`
+ PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false,json=prevNumShardsSearchFalse" json:"prev_num_shards_search_false,omitempty"`
+ LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,json=localReplica,def=" json:"local_replica,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} }
+func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }
+func (*IndexShardSettings) ProtoMessage() {}
+func (*IndexShardSettings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+const Default_IndexShardSettings_NumShards int32 = 1
+
+func (m *IndexShardSettings) GetPrevNumShards() []int32 {
+ if m != nil {
+ return m.PrevNumShards
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return Default_IndexShardSettings_NumShards
+}
+
+func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 {
+ if m != nil {
+ return m.PrevNumShardsSearchFalse
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetLocalReplica() string {
+ if m != nil && m.LocalReplica != nil {
+ return *m.LocalReplica
+ }
+ return ""
+}
+
+type FacetValue struct {
+ Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetValue) Reset() { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage() {}
+func (*FacetValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+type Facet struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Facet) Reset() { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage() {}
+func (*Facet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *Facet) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type DocumentMetadata struct {
+ Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+ CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version,json=committedStVersion" json:"committed_st_version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} }
+func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }
+func (*DocumentMetadata) ProtoMessage() {}
+func (*DocumentMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *DocumentMetadata) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *DocumentMetadata) GetCommittedStVersion() int64 {
+ if m != nil && m.CommittedStVersion != nil {
+ return *m.CommittedStVersion
+ }
+ return 0
+}
+
+type Document struct {
+ Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+ OrderId *int32 `protobuf:"varint,4,opt,name=order_id,json=orderId" json:"order_id,omitempty"`
+ OrderIdSource *Document_OrderIdSource `protobuf:"varint,6,opt,name=order_id_source,json=orderIdSource,enum=search.Document_OrderIdSource,def=1" json:"order_id_source,omitempty"`
+ Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+ Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Document) Reset() { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage() {}
+func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+const Default_Document_Language string = "en"
+const Default_Document_OrderIdSource Document_OrderIdSource = Document_SUPPLIED
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+func (m *Document) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+ if m != nil && m.OrderId != nil {
+ return *m.OrderId
+ }
+ return 0
+}
+
+func (m *Document) GetOrderIdSource() Document_OrderIdSource {
+ if m != nil && m.OrderIdSource != nil {
+ return *m.OrderIdSource
+ }
+ return Default_Document_OrderIdSource
+}
+
+func (m *Document) GetStorage() Document_Storage {
+ if m != nil && m.Storage != nil {
+ return *m.Storage
+ }
+ return Default_Document_Storage
+}
+
+func (m *Document) GetFacet() []*Facet {
+ if m != nil {
+ return m.Facet
+ }
+ return nil
+}
+
+type SearchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset() { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage() {}
+func (*SearchServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+type RequestStatus struct {
+ Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"`
+ CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code,json=canonicalCode" json:"canonical_code,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestStatus) Reset() { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage() {}
+func (*RequestStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+func (m *RequestStatus) GetCanonicalCode() int32 {
+ if m != nil && m.CanonicalCode != nil {
+ return *m.CanonicalCode
+ }
+ return 0
+}
+
+type IndexSpec struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexSpec) Reset() { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage() {}
+func (*IndexSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+ if m != nil && m.Consistency != nil {
+ return *m.Consistency
+ }
+ return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata) Reset() { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage() {}
+func (*IndexMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+ if m != nil {
+ return m.Storage
+ }
+ return nil
+}
+
+type IndexMetadata_Storage struct {
+ AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used,json=amountUsed" json:"amount_used,omitempty"`
+ Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage() {}
+func (*IndexMetadata_Storage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14, 0} }
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+ if m != nil && m.AmountUsed != nil {
+ return *m.AmountUsed
+ }
+ return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type IndexDocumentParams struct {
+ Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+ Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage() {}
+func (*IndexDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+ if m != nil && m.Freshness != nil {
+ return *m.Freshness
+ }
+ return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type IndexDocumentRequest struct {
+ Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage() {}
+func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type IndexDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ DocId []string `protobuf:"bytes,2,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage() {}
+func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+type DeleteDocumentParams struct {
+ DocId []string `protobuf:"bytes,1,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage() {}
+func (*DeleteDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteDocumentRequest struct {
+ Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage() {}
+func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage() {}
+func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type ListDocumentsParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id,json=startDocId" json:"start_doc_id,omitempty"`
+ IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,json=includeStartDoc,def=1" json:"include_start_doc,omitempty"`
+ Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage() {}
+func (*ListDocumentsParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+ if m != nil && m.StartDocId != nil {
+ return *m.StartDocId
+ }
+ return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+ if m != nil && m.IncludeStartDoc != nil {
+ return *m.IncludeStartDoc
+ }
+ return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+type ListDocumentsRequest struct {
+ Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,2,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage() {}
+func (*ListDocumentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListDocumentsResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage() {}
+func (*ListDocumentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+type ListIndexesParams struct {
+ FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema,json=fetchSchema" json:"fetch_schema,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name,json=startIndexName" json:"start_index_name,omitempty"`
+ IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,json=includeStartIndex,def=1" json:"include_start_index,omitempty"`
+ IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix,json=indexNamePrefix" json:"index_name_prefix,omitempty"`
+ Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage() {}
+func (*ListIndexesParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+ if m != nil && m.FetchSchema != nil {
+ return *m.FetchSchema
+ }
+ return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+ if m != nil && m.StartIndexName != nil {
+ return *m.StartIndexName
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+ if m != nil && m.IncludeStartIndex != nil {
+ return *m.IncludeStartIndex
+ }
+ return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+ if m != nil && m.IndexNamePrefix != nil {
+ return *m.IndexNamePrefix
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+ Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage() {}
+func (*ListIndexesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListIndexesResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata,json=indexMetadata" json:"index_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage() {}
+func (*ListIndexesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+ if m != nil {
+ return m.IndexMetadata
+ }
+ return nil
+}
+
+type DeleteSchemaParams struct {
+ Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage() {}
+func (*DeleteSchemaParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteSchemaRequest struct {
+ Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage() {}
+func (*DeleteSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteSchemaResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage() {}
+func (*DeleteSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type SortSpec struct {
+ SortExpression *string `protobuf:"bytes,1,req,name=sort_expression,json=sortExpression" json:"sort_expression,omitempty"`
+ SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,json=sortDescending,def=1" json:"sort_descending,omitempty"`
+ DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text,json=defaultValueText" json:"default_value_text,omitempty"`
+ DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric,json=defaultValueNumeric" json:"default_value_numeric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SortSpec) Reset() { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage() {}
+func (*SortSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+ if m != nil && m.SortExpression != nil {
+ return *m.SortExpression
+ }
+ return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+ if m != nil && m.SortDescending != nil {
+ return *m.SortDescending
+ }
+ return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+ if m != nil && m.DefaultValueText != nil {
+ return *m.DefaultValueText
+ }
+ return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+ if m != nil && m.DefaultValueNumeric != nil {
+ return *m.DefaultValueNumeric
+ }
+ return 0
+}
+
+type ScorerSpec struct {
+ Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+ MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters,json=matchScorerParameters" json:"match_scorer_parameters,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ScorerSpec) Reset() { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage() {}
+func (*ScorerSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+ if m != nil && m.Scorer != nil {
+ return *m.Scorer
+ }
+ return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+ if m != nil && m.MatchScorerParameters != nil {
+ return *m.MatchScorerParameters
+ }
+ return ""
+}
+
+type FieldSpec struct {
+ Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+ Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression,json=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec) Reset() { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage() {}
+func (*FieldSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+
+func (m *FieldSpec) GetName() []string {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+type FieldSpec_Expression struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage() {}
+func (*FieldSpec_Expression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32, 0} }
+
+func (m *FieldSpec_Expression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+ if m != nil && m.Expression != nil {
+ return *m.Expression
+ }
+ return ""
+}
+
+type FacetRange struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRange) Reset() { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage() {}
+func (*FacetRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func (m *FacetRange) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRange) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type FacetRequestParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,json=valueLimit" json:"value_limit,omitempty"`
+ Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+ ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint,json=valueConstraint" json:"value_constraint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage() {}
+func (*FacetRequestParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+ if m != nil {
+ return m.ValueConstraint
+ }
+ return nil
+}
+
+type FacetAutoDetectParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,json=valueLimit,def=10" json:"value_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage() {}
+func (*FacetAutoDetectParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequest) Reset() { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage() {}
+func (*FacetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+
+func (m *FacetRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+type FacetRefinement struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement) Reset() { *m = FacetRefinement{} }
+func (m *FacetRefinement) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement) ProtoMessage() {}
+func (*FacetRefinement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+
+func (m *FacetRefinement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetRange() *FacetRefinement_Range {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+type FacetRefinement_Range struct {
+ Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} }
+func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement_Range) ProtoMessage() {}
+func (*FacetRefinement_Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} }
+
+func (m *FacetRefinement_Range) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRefinement_Range) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type SearchParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+ CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,json=cursorType,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+ Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+ MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy,json=matchedCountAccuracy" json:"matched_count_accuracy,omitempty"`
+ SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec,json=sortSpec" json:"sort_spec,omitempty"`
+ ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec,json=scorerSpec" json:"scorer_spec,omitempty"`
+ FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec,json=fieldSpec" json:"field_spec,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,json=parsingMode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+ AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,json=autoDiscoverFacetCount,def=0" json:"auto_discover_facet_count,omitempty"`
+ IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet,json=includeFacet" json:"include_facet,omitempty"`
+ FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement,json=facetRefinement" json:"facet_refinement,omitempty"`
+ FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param,json=facetAutoDetectParam" json:"facet_auto_detect_param,omitempty"`
+ FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,json=facetDepth,def=1000" json:"facet_depth,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchParams) Reset() { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage() {}
+func (*SearchParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+const Default_SearchParams_FacetDepth int32 = 1000
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+ if m != nil && m.Query != nil {
+ return *m.Query
+ }
+ return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+ if m != nil && m.CursorType != nil {
+ return *m.CursorType
+ }
+ return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+ if m != nil && m.MatchedCountAccuracy != nil {
+ return *m.MatchedCountAccuracy
+ }
+ return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+ if m != nil {
+ return m.SortSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+ if m != nil {
+ return m.ScorerSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+ if m != nil {
+ return m.FieldSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+ if m != nil && m.ParsingMode != nil {
+ return *m.ParsingMode
+ }
+ return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+ if m != nil && m.AutoDiscoverFacetCount != nil {
+ return *m.AutoDiscoverFacetCount
+ }
+ return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+ if m != nil {
+ return m.IncludeFacet
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetRefinement() []*FacetRefinement {
+ if m != nil {
+ return m.FacetRefinement
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+ if m != nil {
+ return m.FacetAutoDetectParam
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetDepth() int32 {
+ if m != nil && m.FacetDepth != nil {
+ return *m.FacetDepth
+ }
+ return Default_SearchParams_FacetDepth
+}
+
+type SearchRequest struct {
+ Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchRequest) Reset() { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage() {}
+func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
+
+func (m *SearchRequest) GetParams() *SearchParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type FacetResultValue struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+ Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResultValue) Reset() { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage() {}
+func (*FacetResultValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
+
+func (m *FacetResultValue) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *FacetResultValue) GetRefinement() *FacetRefinement {
+ if m != nil {
+ return m.Refinement
+ }
+ return nil
+}
+
+type FacetResult struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResult) Reset() { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage() {}
+func (*FacetResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
+
+func (m *FacetResult) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SearchResult struct {
+ Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+ Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+ Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+ Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResult) Reset() { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage() {}
+func (*SearchResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
+
+func (m *SearchResult) GetDocument() *Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+ if m != nil {
+ return m.Score
+ }
+ return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+type SearchResponse struct {
+ Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+ MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count,json=matchedCount" json:"matched_count,omitempty"`
+ Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result,json=facetResult" json:"facet_result,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResponse) Reset() { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage() {}
+func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+ {1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_SearchResponse
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+ if m != nil && m.MatchedCount != nil {
+ return *m.MatchedCount
+ }
+ return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+ if m != nil {
+ return m.FacetResult
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Scope)(nil), "search.Scope")
+ proto.RegisterType((*Entry)(nil), "search.Entry")
+ proto.RegisterType((*AccessControlList)(nil), "search.AccessControlList")
+ proto.RegisterType((*FieldValue)(nil), "search.FieldValue")
+ proto.RegisterType((*FieldValue_Geo)(nil), "search.FieldValue.Geo")
+ proto.RegisterType((*Field)(nil), "search.Field")
+ proto.RegisterType((*FieldTypes)(nil), "search.FieldTypes")
+ proto.RegisterType((*IndexShardSettings)(nil), "search.IndexShardSettings")
+ proto.RegisterType((*FacetValue)(nil), "search.FacetValue")
+ proto.RegisterType((*Facet)(nil), "search.Facet")
+ proto.RegisterType((*DocumentMetadata)(nil), "search.DocumentMetadata")
+ proto.RegisterType((*Document)(nil), "search.Document")
+ proto.RegisterType((*SearchServiceError)(nil), "search.SearchServiceError")
+ proto.RegisterType((*RequestStatus)(nil), "search.RequestStatus")
+ proto.RegisterType((*IndexSpec)(nil), "search.IndexSpec")
+ proto.RegisterType((*IndexMetadata)(nil), "search.IndexMetadata")
+ proto.RegisterType((*IndexMetadata_Storage)(nil), "search.IndexMetadata.Storage")
+ proto.RegisterType((*IndexDocumentParams)(nil), "search.IndexDocumentParams")
+ proto.RegisterType((*IndexDocumentRequest)(nil), "search.IndexDocumentRequest")
+ proto.RegisterType((*IndexDocumentResponse)(nil), "search.IndexDocumentResponse")
+ proto.RegisterType((*DeleteDocumentParams)(nil), "search.DeleteDocumentParams")
+ proto.RegisterType((*DeleteDocumentRequest)(nil), "search.DeleteDocumentRequest")
+ proto.RegisterType((*DeleteDocumentResponse)(nil), "search.DeleteDocumentResponse")
+ proto.RegisterType((*ListDocumentsParams)(nil), "search.ListDocumentsParams")
+ proto.RegisterType((*ListDocumentsRequest)(nil), "search.ListDocumentsRequest")
+ proto.RegisterType((*ListDocumentsResponse)(nil), "search.ListDocumentsResponse")
+ proto.RegisterType((*ListIndexesParams)(nil), "search.ListIndexesParams")
+ proto.RegisterType((*ListIndexesRequest)(nil), "search.ListIndexesRequest")
+ proto.RegisterType((*ListIndexesResponse)(nil), "search.ListIndexesResponse")
+ proto.RegisterType((*DeleteSchemaParams)(nil), "search.DeleteSchemaParams")
+ proto.RegisterType((*DeleteSchemaRequest)(nil), "search.DeleteSchemaRequest")
+ proto.RegisterType((*DeleteSchemaResponse)(nil), "search.DeleteSchemaResponse")
+ proto.RegisterType((*SortSpec)(nil), "search.SortSpec")
+ proto.RegisterType((*ScorerSpec)(nil), "search.ScorerSpec")
+ proto.RegisterType((*FieldSpec)(nil), "search.FieldSpec")
+ proto.RegisterType((*FieldSpec_Expression)(nil), "search.FieldSpec.Expression")
+ proto.RegisterType((*FacetRange)(nil), "search.FacetRange")
+ proto.RegisterType((*FacetRequestParam)(nil), "search.FacetRequestParam")
+ proto.RegisterType((*FacetAutoDetectParam)(nil), "search.FacetAutoDetectParam")
+ proto.RegisterType((*FacetRequest)(nil), "search.FacetRequest")
+ proto.RegisterType((*FacetRefinement)(nil), "search.FacetRefinement")
+ proto.RegisterType((*FacetRefinement_Range)(nil), "search.FacetRefinement.Range")
+ proto.RegisterType((*SearchParams)(nil), "search.SearchParams")
+ proto.RegisterType((*SearchRequest)(nil), "search.SearchRequest")
+ proto.RegisterType((*FacetResultValue)(nil), "search.FacetResultValue")
+ proto.RegisterType((*FacetResult)(nil), "search.FacetResult")
+ proto.RegisterType((*SearchResult)(nil), "search.SearchResult")
+ proto.RegisterType((*SearchResponse)(nil), "search.SearchResponse")
+ proto.RegisterEnum("search.Scope_Type", Scope_Type_name, Scope_Type_value)
+ proto.RegisterEnum("search.Entry_Permission", Entry_Permission_name, Entry_Permission_value)
+ proto.RegisterEnum("search.FieldValue_ContentType", FieldValue_ContentType_name, FieldValue_ContentType_value)
+ proto.RegisterEnum("search.FacetValue_ContentType", FacetValue_ContentType_name, FacetValue_ContentType_value)
+ proto.RegisterEnum("search.Document_OrderIdSource", Document_OrderIdSource_name, Document_OrderIdSource_value)
+ proto.RegisterEnum("search.Document_Storage", Document_Storage_name, Document_Storage_value)
+ proto.RegisterEnum("search.SearchServiceError_ErrorCode", SearchServiceError_ErrorCode_name, SearchServiceError_ErrorCode_value)
+ proto.RegisterEnum("search.IndexSpec_Consistency", IndexSpec_Consistency_name, IndexSpec_Consistency_value)
+ proto.RegisterEnum("search.IndexSpec_Source", IndexSpec_Source_name, IndexSpec_Source_value)
+ proto.RegisterEnum("search.IndexSpec_Mode", IndexSpec_Mode_name, IndexSpec_Mode_value)
+ proto.RegisterEnum("search.IndexDocumentParams_Freshness", IndexDocumentParams_Freshness_name, IndexDocumentParams_Freshness_value)
+ proto.RegisterEnum("search.ScorerSpec_Scorer", ScorerSpec_Scorer_name, ScorerSpec_Scorer_value)
+ proto.RegisterEnum("search.SearchParams_CursorType", SearchParams_CursorType_name, SearchParams_CursorType_value)
+ proto.RegisterEnum("search.SearchParams_ParsingMode", SearchParams_ParsingMode_name, SearchParams_ParsingMode_value)
+}
+
+func init() { proto.RegisterFile("search.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 2960 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x59, 0xcf, 0x73, 0xdb, 0xc6,
+ 0xf5, 0x17, 0x40, 0x91, 0x22, 0x1f, 0x49, 0x09, 0x5a, 0xfd, 0x30, 0xed, 0xf8, 0x9b, 0x28, 0x70,
+ 0x9c, 0x28, 0xf9, 0xda, 0xfa, 0xca, 0xb2, 0x27, 0xf1, 0x57, 0xcd, 0xb4, 0xa1, 0x49, 0x58, 0x66,
+ 0x4d, 0x91, 0xca, 0x12, 0x74, 0xe2, 0xce, 0x34, 0x28, 0x0a, 0xac, 0x64, 0x4c, 0x49, 0x80, 0x01,
+ 0x40, 0xd7, 0xba, 0x75, 0x72, 0xeb, 0xa5, 0xd3, 0x4e, 0x4f, 0x3d, 0x75, 0x32, 0xbd, 0x74, 0x7a,
+ 0xed, 0xbd, 0xa7, 0xf6, 0xd6, 0x5b, 0x4f, 0xfd, 0x07, 0x3a, 0x9d, 0x76, 0xa6, 0x7f, 0x43, 0x67,
+ 0xdf, 0x2e, 0x40, 0x80, 0xa2, 0xa3, 0xd8, 0x37, 0xe2, 0xed, 0xdb, 0xb7, 0x6f, 0xdf, 0xe7, 0xbd,
+ 0xcf, 0xbe, 0x5d, 0x42, 0x2d, 0x62, 0x76, 0xe8, 0x3c, 0xdb, 0x9b, 0x84, 0x41, 0x1c, 0x90, 0x92,
+ 0xf8, 0xd2, 0xff, 0xad, 0x40, 0x71, 0xe0, 0x04, 0x13, 0x46, 0xde, 0x85, 0xe5, 0xf8, 0x7c, 0xc2,
+ 0x1a, 0xca, 0x8e, 0xb2, 0xbb, 0x7a, 0x40, 0xf6, 0xa4, 0x3a, 0x0e, 0xee, 0x99, 0xe7, 0x13, 0x46,
+ 0x71, 0x9c, 0x6c, 0x42, 0xf1, 0xb9, 0x3d, 0x9a, 0xb2, 0x86, 0xba, 0xa3, 0xec, 0x56, 0xa8, 0xf8,
+ 0xd0, 0x7f, 0xa7, 0xc0, 0x32, 0x57, 0x22, 0x0d, 0xd8, 0x1c, 0x0e, 0x0c, 0x6a, 0x3d, 0x78, 0x6a,
+ 0xb5, 0x9a, 0xbd, 0x7e, 0xaf, 0xd3, 0x6a, 0x76, 0xad, 0x4e, 0x5b, 0x53, 0xc8, 0x3a, 0xd4, 0x93,
+ 0x11, 0xe3, 0xb8, 0xd9, 0xe9, 0x6a, 0x2a, 0xb9, 0x0a, 0x5b, 0x47, 0xb4, 0x3f, 0x3c, 0xb9, 0xa0,
+ 0x5d, 0x20, 0x04, 0x56, 0xd3, 0x21, 0xa1, 0xbe, 0x4c, 0x36, 0x60, 0x2d, 0x95, 0xb5, 0xfb, 0xc7,
+ 0xcd, 0x4e, 0x4f, 0x2b, 0x92, 0x3a, 0x54, 0x9a, 0xdd, 0xae, 0xc5, 0x4d, 0x0f, 0xb4, 0x12, 0x79,
+ 0x03, 0xae, 0xf0, 0xcf, 0xe6, 0xd0, 0x7c, 0x64, 0xf4, 0xcc, 0x4e, 0xab, 0x69, 0x1a, 0x6d, 0x39,
+ 0xb8, 0xa2, 0xff, 0x49, 0x81, 0xa2, 0xe1, 0xc7, 0xe1, 0x39, 0xb9, 0x01, 0xc5, 0x88, 0xef, 0x0c,
+ 0xb7, 0x5b, 0x3d, 0xa8, 0xe7, 0xb6, 0x4b, 0xc5, 0x18, 0xb9, 0x0f, 0x30, 0x61, 0xe1, 0xd8, 0x8b,
+ 0x22, 0x2f, 0xf0, 0x71, 0xbf, 0xab, 0x07, 0x8d, 0x44, 0x13, 0xed, 0xec, 0x9d, 0xa4, 0xe3, 0x34,
+ 0xa3, 0x4b, 0xde, 0x86, 0x9a, 0xeb, 0x45, 0x93, 0x91, 0x7d, 0x6e, 0xf9, 0xf6, 0x98, 0x35, 0x0a,
+ 0x18, 0xab, 0xaa, 0x94, 0xf5, 0xec, 0x31, 0xd3, 0xef, 0x02, 0xcc, 0x26, 0x93, 0x32, 0x2c, 0x53,
+ 0xa3, 0xc9, 0xc3, 0x54, 0x81, 0xe2, 0x67, 0xb4, 0x63, 0x1a, 0x9a, 0x4a, 0x34, 0xa8, 0x3d, 0x1c,
+ 0x76, 0xbb, 0x56, 0xab, 0xdf, 0x33, 0x69, 0xbf, 0xab, 0x15, 0x74, 0x0a, 0xeb, 0x4d, 0xc7, 0x61,
+ 0x51, 0xd4, 0x0a, 0xfc, 0x38, 0x0c, 0x46, 0x5d, 0x2f, 0x8a, 0x39, 0x22, 0xc1, 0x4f, 0x7d, 0x16,
+ 0xe2, 0x5e, 0x2a, 0x54, 0x7c, 0x90, 0xf7, 0x60, 0x85, 0xf9, 0x71, 0xe8, 0xb1, 0xa8, 0xa1, 0xee,
+ 0x14, 0xb2, 0x7b, 0x44, 0xcf, 0x69, 0x32, 0xaa, 0xff, 0x41, 0x05, 0x78, 0xe8, 0xb1, 0x91, 0xfb,
+ 0x84, 0x23, 0x49, 0xee, 0xe7, 0xf2, 0xe0, 0xcd, 0x64, 0xd2, 0x4c, 0x63, 0x8f, 0xaf, 0xcd, 0xfc,
+ 0x98, 0xc3, 0x7d, 0xb8, 0x6c, 0x1a, 0x9f, 0x9b, 0x32, 0x33, 0xde, 0x84, 0xf2, 0xc8, 0xf6, 0xcf,
+ 0xa6, 0xf6, 0x99, 0x4c, 0x8e, 0x43, 0x95, 0xf9, 0x34, 0x95, 0xf1, 0xa0, 0x44, 0x71, 0xe8, 0xf9,
+ 0x67, 0x96, 0x48, 0x20, 0x19, 0x14, 0x21, 0x13, 0x8b, 0xef, 0x42, 0xe1, 0x8c, 0x05, 0x8d, 0xe5,
+ 0x1d, 0x65, 0x17, 0x0e, 0xb6, 0x17, 0xac, 0x7d, 0xc4, 0x02, 0xca, 0x55, 0xae, 0xbd, 0x0f, 0x85,
+ 0x23, 0x16, 0x10, 0x0d, 0x0a, 0x23, 0x3b, 0x6e, 0x14, 0x77, 0xd4, 0x5d, 0x85, 0xf2, 0x9f, 0x28,
+ 0xf1, 0xcf, 0x1a, 0x25, 0x29, 0xf1, 0xcf, 0xf4, 0xef, 0x43, 0x35, 0xe3, 0x32, 0x0f, 0x35, 0x77,
+ 0x5a, 0x5b, 0xe2, 0xbf, 0x1e, 0x99, 0xc7, 0x5d, 0x4d, 0xe1, 0xbf, 0x9a, 0x66, 0xff, 0x58, 0x53,
+ 0xf9, 0xaf, 0x76, 0xd3, 0x34, 0xb4, 0x02, 0x01, 0x28, 0xf5, 0x86, 0xc7, 0x0f, 0x0c, 0xaa, 0x2d,
+ 0x93, 0x15, 0x28, 0x1c, 0x19, 0x7d, 0xad, 0xa8, 0x1b, 0x50, 0x44, 0x6f, 0x08, 0x81, 0x65, 0x44,
+ 0x56, 0xd9, 0x51, 0x77, 0x2b, 0x14, 0x7f, 0x93, 0xdd, 0x59, 0x69, 0xa8, 0xbb, 0xd5, 0x59, 0x0d,
+ 0xcd, 0xfc, 0x4f, 0xca, 0xc5, 0x94, 0x21, 0xe7, 0x0e, 0x45, 0x0b, 0x6d, 0x1d, 0x48, 0x18, 0x38,
+ 0x76, 0x97, 0xc2, 0x20, 0x00, 0xd0, 0xff, 0xa2, 0x00, 0xe9, 0xf8, 0x2e, 0x7b, 0x31, 0x78, 0x66,
+ 0x87, 0xee, 0x80, 0xc5, 0xb1, 0xe7, 0x9f, 0x45, 0xe4, 0x5d, 0x58, 0x9b, 0x84, 0xec, 0xb9, 0xe5,
+ 0x4f, 0xc7, 0x56, 0xc4, 0x47, 0xa2, 0x86, 0xb2, 0x53, 0xd8, 0x2d, 0xd2, 0x3a, 0x17, 0xf7, 0xa6,
+ 0x63, 0x54, 0x8f, 0xc8, 0x0e, 0x40, 0x46, 0x85, 0xef, 0xa1, 0x78, 0xa8, 0xdc, 0xa1, 0x15, 0x3f,
+ 0xd5, 0xf8, 0x2e, 0x5c, 0x9f, 0xb3, 0x64, 0x09, 0xbf, 0xac, 0x53, 0x7b, 0x14, 0x71, 0x44, 0xb9,
+ 0xd9, 0x46, 0xce, 0xec, 0x00, 0x15, 0x1e, 0xf2, 0x71, 0x72, 0x13, 0xea, 0xa3, 0xc0, 0xb1, 0x47,
+ 0x56, 0xc8, 0x26, 0x23, 0xcf, 0xb1, 0x11, 0xe8, 0xca, 0xe1, 0x12, 0xad, 0xa1, 0x98, 0x0a, 0xa9,
+ 0xfe, 0x0b, 0x05, 0xe0, 0xa1, 0xed, 0xb0, 0xf8, 0x9b, 0x33, 0x32, 0xd5, 0xc8, 0x67, 0x24, 0x07,
+ 0x52, 0x66, 0xe4, 0xe5, 0x19, 0xa7, 0xdf, 0xb8, 0x90, 0x1c, 0x32, 0x11, 0x32, 0xf0, 0x23, 0xea,
+ 0x7c, 0xb5, 0x57, 0x43, 0x3d, 0xf5, 0x2f, 0x41, 0xfd, 0x0b, 0xd0, 0xda, 0x81, 0x33, 0x1d, 0x33,
+ 0x3f, 0x3e, 0x66, 0xb1, 0xed, 0xda, 0xb1, 0x4d, 0x1a, 0xb0, 0xf2, 0x9c, 0x85, 0x48, 0x30, 0x7c,
+ 0x7f, 0x05, 0x9a, 0x7c, 0x92, 0x7d, 0xd8, 0x74, 0x82, 0xf1, 0xd8, 0x8b, 0x63, 0xe6, 0x5a, 0x51,
+ 0x6c, 0x25, 0x6a, 0x2a, 0xaa, 0x91, 0x74, 0x6c, 0x10, 0x3f, 0x11, 0x23, 0xfa, 0x7f, 0x54, 0x28,
+ 0x27, 0x0b, 0x90, 0x55, 0x50, 0x3d, 0x57, 0x52, 0x82, 0xea, 0xb9, 0x97, 0x56, 0xe7, 0x0d, 0x28,
+ 0x9e, 0xf2, 0xe4, 0x42, 0x10, 0x33, 0x6c, 0x81, 0x19, 0x47, 0xc5, 0x18, 0xb9, 0x0a, 0xe5, 0x20,
+ 0x74, 0x59, 0x68, 0x79, 0x2e, 0x62, 0x57, 0xa4, 0x2b, 0xf8, 0xdd, 0x71, 0xc9, 0x09, 0xac, 0x25,
+ 0x43, 0x56, 0x14, 0x4c, 0x43, 0x87, 0x35, 0x4a, 0x79, 0xc0, 0x12, 0xd7, 0xf6, 0xfa, 0x62, 0xca,
+ 0x00, 0xb5, 0x0e, 0xcb, 0x83, 0xe1, 0xc9, 0x49, 0xb7, 0x63, 0xb4, 0x69, 0x3d, 0xc8, 0x0e, 0x90,
+ 0xfb, 0xb0, 0x12, 0xc5, 0x41, 0xc8, 0x1d, 0x2e, 0xe6, 0xb9, 0x37, 0xb5, 0x34, 0x10, 0xe3, 0x87,
+ 0xcb, 0xed, 0xce, 0xe0, 0x31, 0x4d, 0xd4, 0x71, 0x2f, 0x3c, 0xfa, 0x8d, 0xf2, 0xdc, 0x5e, 0xb8,
+ 0x90, 0x8a, 0x31, 0xfd, 0x16, 0xd4, 0x73, 0x8e, 0xf0, 0x93, 0xa4, 0x6d, 0x3c, 0x6c, 0x0e, 0xbb,
+ 0xa6, 0xd1, 0xd6, 0x96, 0x48, 0x0d, 0x52, 0xcf, 0x34, 0x45, 0xdf, 0x80, 0x15, 0xb9, 0x18, 0x52,
+ 0x44, 0x67, 0xf0, 0x58, 0x5b, 0xd2, 0x7f, 0xaf, 0x00, 0x11, 0xf9, 0x3d, 0x60, 0xe1, 0x73, 0xcf,
+ 0x61, 0x46, 0x18, 0x06, 0xa1, 0xfe, 0x2b, 0x05, 0x2a, 0xf8, 0xab, 0x15, 0xb8, 0x8c, 0x94, 0x40,
+ 0xed, 0x3f, 0xd6, 0x96, 0xf8, 0xe9, 0xd5, 0xe9, 0x3d, 0x69, 0x76, 0x3b, 0x6d, 0x8b, 0x1a, 0x9f,
+ 0x0e, 0x8d, 0x81, 0xa9, 0x29, 0x5c, 0x68, 0xd2, 0x66, 0x6f, 0xd0, 0x31, 0x7a, 0xa6, 0x65, 0x50,
+ 0xda, 0xa7, 0x9a, 0xca, 0xcf, 0xbe, 0x4e, 0xcf, 0x34, 0x68, 0xaf, 0xd9, 0x95, 0xb2, 0x02, 0xd9,
+ 0x82, 0xf5, 0x13, 0x83, 0x1e, 0x77, 0x06, 0x83, 0x4e, 0xbf, 0x67, 0xb5, 0x8d, 0x1e, 0x77, 0x6b,
+ 0x99, 0x54, 0x61, 0xc5, 0xec, 0x1c, 0x1b, 0xfd, 0xa1, 0xa9, 0x15, 0xc9, 0x35, 0xd8, 0x6e, 0xf5,
+ 0x7b, 0xad, 0x21, 0xa5, 0xdc, 0x1a, 0xda, 0x6d, 0xb6, 0xcc, 0x4e, 0xbf, 0xa7, 0x95, 0xf4, 0x5f,
+ 0x2b, 0x50, 0xa7, 0xec, 0xcb, 0x29, 0x8b, 0xe2, 0x41, 0x6c, 0xc7, 0xd3, 0x88, 0x97, 0x95, 0x13,
+ 0xb8, 0x22, 0x97, 0x57, 0x0f, 0xde, 0x49, 0x4f, 0xc0, 0x0b, 0xfb, 0xd9, 0x4b, 0xf7, 0x42, 0x71,
+ 0x06, 0x2f, 0x2b, 0xc6, 0x45, 0x96, 0xcb, 0x62, 0xdb, 0x1b, 0xc9, 0x4e, 0xa0, 0x8a, 0xb2, 0x36,
+ 0x8a, 0xc8, 0x4d, 0x58, 0x75, 0x6c, 0x3f, 0xf0, 0x3d, 0x5e, 0xed, 0xb8, 0x4c, 0x01, 0xd3, 0xa5,
+ 0x9e, 0x4a, 0xb9, 0x3d, 0xfd, 0xeb, 0x02, 0x54, 0x04, 0x63, 0x4d, 0x98, 0xb3, 0xb0, 0xba, 0x8e,
+ 0xa1, 0xea, 0x04, 0x7e, 0xe4, 0x45, 0x31, 0xf3, 0x9d, 0x73, 0x79, 0x08, 0xff, 0x4f, 0xe2, 0x6c,
+ 0x3a, 0x97, 0x53, 0x40, 0xa2, 0x74, 0x58, 0x3b, 0x31, 0xa8, 0xd5, 0xee, 0xb7, 0x86, 0xc7, 0x46,
+ 0xcf, 0xa4, 0xd9, 0xf9, 0xe4, 0x3a, 0x54, 0xb8, 0xd9, 0x68, 0x62, 0x3b, 0x09, 0x1d, 0xcc, 0x04,
+ 0xd9, 0x62, 0x94, 0xd9, 0x9d, 0x14, 0xe3, 0x7d, 0x28, 0xc9, 0xa4, 0x9e, 0x4b, 0xc5, 0x99, 0x07,
+ 0x32, 0x9d, 0x4b, 0x03, 0xa3, 0x49, 0x5b, 0x8f, 0xa8, 0xd4, 0x27, 0xf7, 0x60, 0x79, 0xcc, 0xf7,
+ 0x2f, 0x8a, 0x61, 0xfb, 0xe2, 0xbc, 0xe3, 0xc0, 0x65, 0x87, 0xe5, 0x13, 0xda, 0xe9, 0xd3, 0x8e,
+ 0xf9, 0x94, 0xa2, 0xb6, 0xfe, 0xbf, 0x48, 0x4b, 0xa9, 0xdb, 0x00, 0xa5, 0xa3, 0x6e, 0xff, 0x41,
+ 0xb3, 0xab, 0x2d, 0xf1, 0xae, 0x20, 0xbb, 0x3f, 0x4d, 0xd1, 0x3f, 0x84, 0x92, 0x4c, 0x61, 0x00,
+ 0xb9, 0xbc, 0xb6, 0x84, 0xe9, 0xdc, 0x34, 0x9b, 0x03, 0xb3, 0x4f, 0x0d, 0xd1, 0x7e, 0xb5, 0xba,
+ 0xfd, 0x61, 0xdb, 0xe2, 0x82, 0xe6, 0x91, 0xa1, 0xa9, 0xfa, 0x3b, 0xb0, 0xcc, 0x17, 0xe7, 0x99,
+ 0x9e, 0x2c, 0xaf, 0x2d, 0x91, 0x55, 0x80, 0x07, 0xcd, 0xd6, 0x63, 0xde, 0x69, 0xf5, 0x78, 0xe6,
+ 0xff, 0x43, 0x81, 0x3a, 0x7a, 0x9b, 0x72, 0xd6, 0x3e, 0x80, 0xc7, 0x05, 0x56, 0x34, 0x61, 0x0e,
+ 0xa2, 0x55, 0x3d, 0x58, 0xbf, 0xb0, 0x31, 0x5a, 0xf1, 0x52, 0x64, 0x77, 0x13, 0x72, 0x11, 0xad,
+ 0x48, 0xfe, 0x64, 0xc4, 0x43, 0x30, 0x61, 0x98, 0x8f, 0x66, 0x45, 0x5f, 0xc0, 0xd6, 0x2c, 0x8f,
+ 0x75, 0xe2, 0x43, 0x52, 0xf9, 0x69, 0xcd, 0x5f, 0xfb, 0x64, 0x56, 0xa0, 0x6f, 0x41, 0xd5, 0x1e,
+ 0x07, 0x53, 0x3f, 0xb6, 0xa6, 0x11, 0x73, 0x25, 0xaf, 0x82, 0x10, 0x0d, 0x23, 0xe6, 0xf2, 0x8e,
+ 0x69, 0xe4, 0x8d, 0xbd, 0x58, 0x72, 0xa9, 0xf8, 0xd0, 0xbf, 0x52, 0x61, 0x03, 0x17, 0x49, 0xe8,
+ 0xe5, 0xc4, 0x0e, 0xed, 0x71, 0x44, 0x6e, 0x41, 0xd9, 0x95, 0x12, 0x3c, 0x38, 0xab, 0x07, 0xda,
+ 0x3c, 0x11, 0xd1, 0x54, 0x83, 0x3c, 0x81, 0xca, 0x69, 0xc8, 0xa2, 0x67, 0x3e, 0x8b, 0x22, 0x99,
+ 0xae, 0x37, 0x73, 0x5b, 0xc8, 0x5b, 0xdf, 0x7b, 0x98, 0x28, 0x1f, 0xd6, 0x07, 0x4f, 0x7b, 0xad,
+ 0x47, 0xb4, 0xdf, 0xeb, 0x0f, 0x07, 0xdd, 0xa7, 0x0f, 0xd4, 0x86, 0x42, 0x67, 0xa6, 0xe6, 0x82,
+ 0x5e, 0xb8, 0x3c, 0xe8, 0xfa, 0x5d, 0xa8, 0xa4, 0xc6, 0x39, 0xfc, 0x39, 0xf3, 0x82, 0x90, 0x3e,
+ 0x7b, 0x64, 0xf4, 0x78, 0x7b, 0xf9, 0x84, 0xf3, 0x09, 0xe6, 0xd2, 0x8f, 0x61, 0x33, 0xe7, 0xa5,
+ 0xe4, 0x0c, 0x72, 0x17, 0x4a, 0x13, 0x74, 0x58, 0xe2, 0xfd, 0xc6, 0x37, 0xec, 0x89, 0x4a, 0x55,
+ 0xb2, 0x05, 0x25, 0x7b, 0x32, 0xe1, 0x87, 0x05, 0xc7, 0xb2, 0x46, 0x8b, 0xf6, 0x64, 0xd2, 0x71,
+ 0xf5, 0x1f, 0xc2, 0xd6, 0xdc, 0x1a, 0xd1, 0x24, 0xf0, 0x23, 0x46, 0x6e, 0x43, 0x29, 0x42, 0x72,
+ 0x92, 0x71, 0xde, 0x4a, 0x16, 0xc9, 0x31, 0x17, 0x95, 0x4a, 0xdc, 0xbc, 0x1b, 0x38, 0xdc, 0x3c,
+ 0x4f, 0xab, 0x0a, 0x2d, 0xba, 0x81, 0xd3, 0x71, 0x75, 0x0b, 0x36, 0xdb, 0x6c, 0xc4, 0x62, 0x36,
+ 0x87, 0xe3, 0x4c, 0x5d, 0xc9, 0xa8, 0xcf, 0x05, 0x56, 0xfd, 0x16, 0x81, 0x75, 0x61, 0x2b, 0xbf,
+ 0x40, 0x12, 0xa4, 0x7b, 0x73, 0x41, 0xba, 0x9e, 0xe6, 0xc9, 0x02, 0x7f, 0x2e, 0x8b, 0xd2, 0x11,
+ 0x6c, 0xcf, 0xaf, 0xf2, 0x5a, 0x61, 0xd2, 0xff, 0xa6, 0xc0, 0x06, 0xbf, 0x28, 0x24, 0x76, 0x22,
+ 0x19, 0x8f, 0x57, 0x2f, 0xe3, 0x1d, 0xde, 0x4f, 0xd9, 0x61, 0x6c, 0xa5, 0x61, 0xe7, 0x04, 0x0a,
+ 0x28, 0x6b, 0xcb, 0x60, 0xae, 0x7b, 0xbe, 0x33, 0x9a, 0xba, 0xcc, 0x4a, 0x35, 0x71, 0x5b, 0xe5,
+ 0xc3, 0xe5, 0x38, 0x9c, 0x32, 0xba, 0x26, 0x87, 0x07, 0x72, 0x0e, 0xb9, 0x9a, 0xd4, 0x22, 0x32,
+ 0xee, 0x61, 0xe1, 0xce, 0xfe, 0xbe, 0x2c, 0x48, 0xf2, 0x06, 0x54, 0x7e, 0xc2, 0xce, 0x23, 0x2b,
+ 0xf0, 0x47, 0xe7, 0xc8, 0xbb, 0x65, 0x5a, 0xe6, 0x82, 0xbe, 0x3f, 0x3a, 0xe7, 0x89, 0x9a, 0xdb,
+ 0xd4, 0xa5, 0x89, 0xba, 0x20, 0x04, 0x0b, 0x20, 0x50, 0xb3, 0x10, 0xc4, 0xb0, 0x35, 0xb7, 0xc6,
+ 0x02, 0x04, 0xd4, 0xcb, 0x13, 0x35, 0xcb, 0x20, 0xea, 0x65, 0x0c, 0xa2, 0xff, 0x55, 0x85, 0x75,
+ 0xbe, 0x2c, 0x42, 0xc0, 0x12, 0xb4, 0xde, 0x86, 0xda, 0x29, 0x8b, 0x9d, 0x67, 0x56, 0xe4, 0x3c,
+ 0x63, 0x63, 0x1b, 0x59, 0xad, 0x4c, 0xab, 0x28, 0x1b, 0xa0, 0x88, 0x34, 0xb2, 0xb4, 0x56, 0x3c,
+ 0x54, 0x0f, 0xd2, 0x48, 0x7e, 0xf3, 0xb1, 0xb7, 0x0b, 0x9a, 0x00, 0x4b, 0xa4, 0x03, 0x9e, 0xc1,
+ 0xd8, 0x99, 0xd3, 0x55, 0x94, 0xa3, 0x23, 0xfc, 0xd2, 0x4a, 0xee, 0xc1, 0x46, 0x1e, 0x5e, 0x9c,
+ 0x21, 0xb0, 0x91, 0x00, 0xaf, 0x67, 0x01, 0xc6, 0x99, 0xe4, 0x03, 0x9e, 0x14, 0x89, 0x65, 0x6b,
+ 0x12, 0xb2, 0x53, 0xef, 0x05, 0x9e, 0x87, 0x15, 0x9e, 0x0e, 0xd2, 0xf6, 0x09, 0x8a, 0xc9, 0x36,
+ 0x94, 0x82, 0xd3, 0xd3, 0x88, 0xc5, 0x8d, 0x15, 0x3c, 0x81, 0xe5, 0x57, 0xe6, 0x00, 0x2e, 0xbf,
+ 0xda, 0x01, 0xac, 0x7f, 0x01, 0x24, 0x13, 0xcd, 0x24, 0x4d, 0xee, 0xcc, 0xa5, 0xc9, 0xd5, 0x6c,
+ 0x9a, 0xe4, 0x22, 0x7f, 0x59, 0x9d, 0x7e, 0x25, 0xcb, 0x2b, 0x5d, 0xe0, 0xf5, 0x72, 0xe4, 0x63,
+ 0x58, 0x15, 0x41, 0x1a, 0xcb, 0x23, 0x4e, 0x66, 0xca, 0xd6, 0xc2, 0xf3, 0x8f, 0xd6, 0xbd, 0xec,
+ 0xa7, 0xfe, 0x33, 0x05, 0x88, 0x60, 0x0b, 0x91, 0x0b, 0x32, 0x69, 0x66, 0x51, 0x53, 0x5e, 0xb1,
+ 0x6d, 0x99, 0x67, 0xc5, 0xc2, 0xa5, 0xac, 0xf8, 0x23, 0xd8, 0xc8, 0x7a, 0x90, 0x04, 0xfa, 0x60,
+ 0x2e, 0xd0, 0xd7, 0xf2, 0x9c, 0x98, 0x75, 0xf7, 0xb2, 0x48, 0x1b, 0x09, 0xb1, 0x27, 0x2b, 0xbc,
+ 0x1e, 0x1f, 0xfe, 0x59, 0x81, 0xf2, 0x20, 0x08, 0x63, 0xa4, 0xb4, 0xf7, 0x60, 0x2d, 0x0a, 0xc2,
+ 0xd8, 0x62, 0x2f, 0x26, 0x21, 0x8b, 0xe4, 0x3d, 0x4c, 0xc5, 0xd4, 0x0f, 0xc2, 0xd8, 0x48, 0xa5,
+ 0xe4, 0xb6, 0x54, 0x74, 0x59, 0xe4, 0x30, 0xdf, 0xf5, 0xfc, 0x33, 0x2c, 0xb3, 0x24, 0xed, 0x51,
+ 0xbd, 0x9d, 0x8e, 0x91, 0x5b, 0x40, 0x5c, 0x76, 0x6a, 0x4f, 0x47, 0xb1, 0xb8, 0x7b, 0x5a, 0x31,
+ 0x7b, 0x11, 0xcb, 0xaa, 0xd2, 0xe4, 0x08, 0x5e, 0x0e, 0x4d, 0xf6, 0x82, 0x07, 0x69, 0x2b, 0xaf,
+ 0xed, 0x4f, 0xc7, 0x2c, 0xf4, 0x1c, 0xac, 0x2c, 0x85, 0x6e, 0x64, 0x27, 0xf4, 0xc4, 0x90, 0xfe,
+ 0x77, 0x05, 0x60, 0xe0, 0x04, 0x21, 0x0b, 0x71, 0x23, 0xdf, 0x83, 0x52, 0x84, 0x5f, 0x12, 0xea,
+ 0xab, 0x99, 0x27, 0x2d, 0xa9, 0x23, 0x7f, 0x1e, 0xd6, 0x8e, 0x9b, 0x66, 0xeb, 0x91, 0x35, 0x68,
+ 0xf5, 0xa9, 0x41, 0xa9, 0x9c, 0x46, 0xae, 0xe5, 0xd9, 0x63, 0xf9, 0xce, 0xfe, 0x8c, 0x89, 0x3f,
+ 0x84, 0x2b, 0x63, 0x5b, 0x90, 0x0f, 0xd7, 0xb5, 0x10, 0x27, 0x16, 0xb3, 0x30, 0x6a, 0x54, 0x70,
+ 0x4b, 0x5b, 0x38, 0x2c, 0xec, 0x9f, 0xa4, 0x83, 0xd8, 0x99, 0x26, 0xd6, 0xb7, 0xa9, 0xc1, 0x57,
+ 0xec, 0xf4, 0x8e, 0xac, 0xec, 0xfa, 0xa2, 0xa3, 0xcd, 0x49, 0x54, 0xfd, 0xb7, 0x0a, 0x54, 0xb0,
+ 0x37, 0x9c, 0xbb, 0x17, 0x14, 0xd2, 0x7b, 0xc1, 0xc7, 0x00, 0x19, 0xc8, 0x78, 0x7e, 0xc2, 0xec,
+ 0xb8, 0x4d, 0xa7, 0xee, 0xcd, 0x00, 0xa4, 0x19, 0xfd, 0x6b, 0x9f, 0x00, 0x64, 0xa0, 0x4d, 0xec,
+ 0x17, 0x32, 0xf7, 0x8e, 0x37, 0x73, 0xf6, 0x97, 0x71, 0x24, 0x23, 0xd1, 0x1f, 0xc9, 0x27, 0x0a,
+ 0x6a, 0xfb, 0x67, 0x2c, 0xe3, 0xa1, 0x92, 0x5a, 0xd8, 0x84, 0x22, 0x72, 0x64, 0xf2, 0x50, 0x8a,
+ 0x1f, 0x44, 0x83, 0x02, 0xf3, 0x5d, 0xc9, 0xc1, 0xfc, 0xa7, 0xfe, 0x73, 0x05, 0xd6, 0x85, 0x29,
+ 0x91, 0xad, 0x18, 0x3e, 0xde, 0xc3, 0x8a, 0x4c, 0x10, 0x98, 0x28, 0x48, 0x86, 0x80, 0xa2, 0x2e,
+ 0x42, 0xb2, 0x0b, 0xc5, 0x90, 0xaf, 0x7d, 0xa1, 0xa5, 0x4e, 0xbd, 0xa2, 0x42, 0x81, 0xbc, 0x0f,
+ 0x9a, 0x30, 0xc5, 0x2f, 0x42, 0x71, 0x68, 0x7b, 0x7e, 0x8c, 0x97, 0xfc, 0x0a, 0x5d, 0x43, 0x79,
+ 0x2b, 0x15, 0xeb, 0xdf, 0x81, 0x4d, 0x9c, 0xdf, 0x9c, 0xc6, 0x41, 0x9b, 0xc5, 0xcc, 0x91, 0xde,
+ 0xdc, 0x58, 0xe0, 0xcd, 0xa1, 0x7a, 0x67, 0x3f, 0xeb, 0x91, 0x3e, 0x84, 0x5a, 0x76, 0x1f, 0x0b,
+ 0xaf, 0x73, 0x33, 0xda, 0x55, 0xb1, 0xbb, 0xbf, 0x9a, 0x77, 0x3b, 0x13, 0x81, 0x84, 0x0c, 0xf4,
+ 0xaf, 0x15, 0x58, 0x93, 0xa3, 0xa7, 0x9e, 0xcf, 0xb0, 0xc9, 0x5e, 0x64, 0x7a, 0xe1, 0xc3, 0x34,
+ 0xb9, 0x9b, 0x84, 0x69, 0xee, 0x36, 0x31, 0x67, 0x71, 0x2f, 0x1b, 0xb1, 0x6b, 0xff, 0x07, 0x45,
+ 0x81, 0x6b, 0x8a, 0xa1, 0xb2, 0x00, 0x43, 0x75, 0x86, 0xe1, 0x1f, 0x57, 0xa0, 0x26, 0x2e, 0xce,
+ 0xaf, 0xdd, 0x5b, 0x6d, 0x42, 0xf1, 0xcb, 0x29, 0x0b, 0xcf, 0xb1, 0x03, 0xad, 0x50, 0xf1, 0xc1,
+ 0x8f, 0x43, 0x67, 0x1a, 0x46, 0x41, 0x28, 0xa9, 0x43, 0x7e, 0x65, 0x8e, 0xc9, 0x6a, 0xee, 0x98,
+ 0x7c, 0x08, 0x55, 0xa1, 0x61, 0xe1, 0x93, 0x99, 0xb8, 0xac, 0xbe, 0x95, 0xbf, 0xdb, 0xcb, 0x8b,
+ 0x47, 0x0b, 0xf5, 0xc4, 0x9b, 0x59, 0xaf, 0xdf, 0x33, 0x28, 0x38, 0xa9, 0x64, 0xd6, 0x4a, 0x94,
+ 0xe6, 0x5b, 0x89, 0x7b, 0xb0, 0x8d, 0xb5, 0xce, 0x5c, 0xcb, 0xc1, 0x3b, 0x96, 0xed, 0x38, 0xd3,
+ 0xd0, 0x76, 0xce, 0xe5, 0x81, 0xbd, 0x29, 0x47, 0x5b, 0x7c, 0xb0, 0x29, 0xc7, 0xc8, 0x6d, 0xa8,
+ 0x20, 0x7b, 0x62, 0x38, 0xca, 0xf9, 0x16, 0x28, 0xe1, 0x62, 0x5a, 0x8e, 0x12, 0x56, 0xbe, 0x0b,
+ 0x55, 0xc9, 0x34, 0x38, 0xa1, 0x82, 0xd8, 0x91, 0x8b, 0x8c, 0x46, 0x21, 0x9a, 0x31, 0xe0, 0x3e,
+ 0x00, 0xde, 0x21, 0xc5, 0x1c, 0xc0, 0x39, 0xeb, 0x17, 0x28, 0x81, 0x56, 0x4e, 0x53, 0x62, 0xc9,
+ 0x35, 0x98, 0xb5, 0x7c, 0x83, 0x49, 0x1e, 0x43, 0x6d, 0x62, 0x87, 0x91, 0xe7, 0x9f, 0x59, 0x78,
+ 0x81, 0xaf, 0x63, 0x2c, 0x77, 0x16, 0xc6, 0xf2, 0x44, 0x28, 0xe2, 0x55, 0xbe, 0x34, 0x30, 0x69,
+ 0xa7, 0x65, 0xd2, 0xea, 0x64, 0x26, 0x24, 0x1f, 0xc3, 0x55, 0x7b, 0x1a, 0x07, 0x96, 0xeb, 0x45,
+ 0x4e, 0xf0, 0x9c, 0x85, 0x16, 0xbe, 0x41, 0x89, 0x08, 0x36, 0xd6, 0x30, 0xc6, 0xca, 0x3e, 0xdd,
+ 0xe6, 0x3a, 0x6d, 0xa9, 0x82, 0x19, 0x8a, 0x51, 0x24, 0xff, 0x0f, 0xf5, 0xa4, 0xed, 0x12, 0xef,
+ 0x5a, 0x1a, 0x46, 0x70, 0x73, 0x51, 0xf1, 0xd0, 0x9a, 0x54, 0x15, 0x2f, 0x96, 0x0f, 0x40, 0x13,
+ 0x4b, 0x85, 0x69, 0xae, 0x37, 0xd6, 0x71, 0xf6, 0x95, 0x97, 0x94, 0x02, 0x5d, 0x3b, 0x9d, 0xab,
+ 0xb6, 0x01, 0x5c, 0x11, 0x36, 0xc4, 0x16, 0x90, 0x17, 0xc4, 0x11, 0xd0, 0x20, 0x18, 0xe5, 0xeb,
+ 0x39, 0x53, 0x73, 0xe4, 0x41, 0x37, 0x4f, 0x17, 0x51, 0xca, 0x4d, 0xa8, 0x0a, 0xa3, 0x2e, 0x9b,
+ 0xc4, 0xcf, 0x1a, 0x1b, 0x99, 0x43, 0x07, 0x70, 0xa0, 0xcd, 0xe5, 0xfa, 0x01, 0xc0, 0x2c, 0x51,
+ 0x49, 0x19, 0x30, 0x55, 0xb5, 0x25, 0x7c, 0xe9, 0xe8, 0xf4, 0x8e, 0xba, 0x86, 0xa6, 0x90, 0x55,
+ 0x80, 0x13, 0x83, 0x5a, 0xd4, 0x18, 0x0c, 0xbb, 0xa6, 0xa6, 0xea, 0xef, 0x42, 0x35, 0x03, 0x08,
+ 0xaa, 0x22, 0x24, 0xda, 0x12, 0xa9, 0xc2, 0x0a, 0x35, 0xba, 0xcd, 0xcf, 0xf1, 0x4d, 0xcf, 0x84,
+ 0xba, 0x40, 0x31, 0x61, 0xac, 0x5b, 0x73, 0xbd, 0xca, 0xe6, 0x22, 0xb0, 0x2f, 0xeb, 0x52, 0xa6,
+ 0xa0, 0xc9, 0x88, 0x46, 0xc9, 0x91, 0xfd, 0x32, 0xbe, 0x12, 0xf0, 0xe3, 0x4b, 0x3b, 0x15, 0x1f,
+ 0xe4, 0x23, 0x80, 0x0c, 0x52, 0xe2, 0x9a, 0xff, 0x52, 0xa4, 0x32, 0xaa, 0xfa, 0xa7, 0x50, 0xcd,
+ 0x2c, 0xbb, 0x70, 0xc5, 0xbd, 0x19, 0x43, 0xf2, 0x04, 0x68, 0xcc, 0x99, 0x4d, 0xdd, 0x4d, 0xde,
+ 0xab, 0x7f, 0xa3, 0x24, 0xac, 0x26, 0x8d, 0xe6, 0x5f, 0x42, 0xd4, 0x4b, 0x5e, 0x42, 0x6e, 0xcf,
+ 0x1d, 0xa1, 0x0b, 0x9e, 0x95, 0x33, 0x0a, 0xc8, 0xb5, 0xbc, 0x98, 0xd1, 0x3b, 0x85, 0x8a, 0x8f,
+ 0x0c, 0x01, 0x16, 0xb2, 0x04, 0xa8, 0xff, 0x4b, 0x81, 0xd5, 0xd4, 0x37, 0xd1, 0x06, 0xde, 0x82,
+ 0x52, 0x88, 0x7e, 0xca, 0x36, 0x70, 0x0e, 0x3d, 0xb1, 0x07, 0x2a, 0x75, 0xc8, 0x0d, 0xa8, 0xe7,
+ 0x78, 0x0c, 0x61, 0x28, 0xd0, 0x5a, 0x96, 0xbe, 0x32, 0x9d, 0x65, 0xe1, 0xdb, 0xf4, 0xf0, 0x2f,
+ 0x63, 0xeb, 0x0f, 0xa1, 0x96, 0x14, 0x21, 0xfa, 0x57, 0x44, 0xff, 0x36, 0x16, 0xc4, 0x9f, 0x56,
+ 0x4f, 0x67, 0x1f, 0x1f, 0x94, 0xca, 0xff, 0x5c, 0xd1, 0x7e, 0xd9, 0x7b, 0x50, 0xfe, 0x81, 0xfc,
+ 0xbf, 0xf6, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x12, 0xcb, 0x31, 0xc6, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 000000000..61df6508b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,394 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+ enum Type {
+ USER_BY_CANONICAL_ID = 1;
+ USER_BY_EMAIL = 2;
+ GROUP_BY_CANONICAL_ID = 3;
+ GROUP_BY_EMAIL = 4;
+ GROUP_BY_DOMAIN = 5;
+ ALL_USERS = 6;
+ ALL_AUTHENTICATED_USERS = 7;
+ }
+
+ optional Type type = 1;
+ optional string value = 2;
+}
+
+message Entry {
+ enum Permission {
+ READ = 1;
+ WRITE = 2;
+ FULL_CONTROL = 3;
+ }
+
+ optional Scope scope = 1;
+ optional Permission permission = 2;
+ optional string display_name = 3;
+}
+
+message AccessControlList {
+ optional string owner = 1;
+ repeated Entry entries = 2;
+}
+
+message FieldValue {
+ enum ContentType {
+ TEXT = 0;
+ HTML = 1;
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ GEO = 5;
+ }
+
+ optional ContentType type = 1 [default = TEXT];
+
+ optional string language = 2 [default = "en"];
+
+ optional string string_value = 3;
+
+ optional group Geo = 4 {
+ required double lat = 5;
+ required double lng = 6;
+ }
+}
+
+message Field {
+ required string name = 1;
+ required FieldValue value = 2;
+}
+
+message FieldTypes {
+ required string name = 1;
+ repeated FieldValue.ContentType type = 2;
+}
+
+message IndexShardSettings {
+ repeated int32 prev_num_shards = 1;
+ required int32 num_shards = 2 [default=1];
+ repeated int32 prev_num_shards_search_false = 3;
+ optional string local_replica = 4 [default = ""];
+}
+
+message FacetValue {
+ enum ContentType {
+ ATOM = 2;
+ NUMBER = 4;
+ }
+
+ optional ContentType type = 1 [default = ATOM];
+ optional string string_value = 3;
+}
+
+message Facet {
+ required string name = 1;
+ required FacetValue value = 2;
+}
+
+message DocumentMetadata {
+ optional int64 version = 1;
+ optional int64 committed_st_version = 2;
+}
+
+message Document {
+ optional string id = 1;
+ optional string language = 2 [default = "en"];
+ repeated Field field = 3;
+ optional int32 order_id = 4;
+ optional OrderIdSource order_id_source = 6 [default = SUPPLIED];
+
+ enum OrderIdSource {
+ DEFAULTED = 0;
+ SUPPLIED = 1;
+ }
+
+ enum Storage {
+ DISK = 0;
+ }
+
+ optional Storage storage = 5 [default = DISK];
+ repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ PERMISSION_DENIED = 4;
+ TIMEOUT = 5;
+ CONCURRENT_TRANSACTION = 6;
+ }
+}
+
+message RequestStatus {
+ required SearchServiceError.ErrorCode code = 1;
+ optional string error_detail = 2;
+ optional int32 canonical_code = 3;
+}
+
+message IndexSpec {
+ required string name = 1;
+
+ enum Consistency {
+ GLOBAL = 0;
+ PER_DOCUMENT = 1;
+ }
+ optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+ optional string namespace = 3;
+ optional int32 version = 4;
+
+ enum Source {
+ SEARCH = 0;
+ DATASTORE = 1;
+ CLOUD_STORAGE = 2;
+ }
+ optional Source source = 5 [default = SEARCH];
+
+ enum Mode {
+ PRIORITY = 0;
+ BACKGROUND = 1;
+ }
+ optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+ required IndexSpec index_spec = 1;
+
+ repeated FieldTypes field = 2;
+
+ message Storage {
+ optional int64 amount_used = 1;
+ optional int64 limit = 2;
+ }
+ optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+ repeated Document document = 1;
+
+ enum Freshness {
+ SYNCHRONOUSLY = 0;
+ WHEN_CONVENIENT = 1;
+ }
+ optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+ required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+ required IndexDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+ repeated RequestStatus status = 1;
+
+ repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+ repeated string doc_id = 1;
+
+ required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+ required DeleteDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+ repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+ required IndexSpec index_spec = 1;
+ optional string start_doc_id = 2;
+ optional bool include_start_doc = 3 [default = true];
+ optional int32 limit = 4 [default = 100];
+ optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+ required ListDocumentsParams params = 1;
+
+ optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+ required RequestStatus status = 1;
+
+ repeated Document document = 2;
+}
+
+message ListIndexesParams {
+ optional bool fetch_schema = 1;
+ optional int32 limit = 2 [default = 20];
+ optional string namespace = 3;
+ optional string start_index_name = 4;
+ optional bool include_start_index = 5 [default = true];
+ optional string index_name_prefix = 6;
+ optional int32 offset = 7;
+ optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+ required ListIndexesParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+ required RequestStatus status = 1;
+ repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+ optional IndexSpec.Source source = 1 [default = SEARCH];
+ repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+ required DeleteSchemaParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+ repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+ required string sort_expression = 1;
+ optional bool sort_descending = 2 [default = true];
+ optional string default_value_text = 4;
+ optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+ enum Scorer {
+ RESCORING_MATCH_SCORER = 0;
+ MATCH_SCORER = 2;
+ }
+ optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+ optional int32 limit = 2 [default = 1000];
+ optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+ repeated string name = 1;
+
+ repeated group Expression = 2 {
+ required string name = 3;
+ required string expression = 4;
+ }
+}
+
+message FacetRange {
+ optional string name = 1;
+ optional string start = 2;
+ optional string end = 3;
+}
+
+message FacetRequestParam {
+ optional int32 value_limit = 1;
+ repeated FacetRange range = 2;
+ repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+ optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+ required string name = 1;
+ optional FacetRequestParam params = 2;
+}
+
+message FacetRefinement {
+ required string name = 1;
+ optional string value = 2;
+
+ message Range {
+ optional string start = 1;
+ optional string end = 2;
+ }
+ optional Range range = 3;
+}
+
+message SearchParams {
+ required IndexSpec index_spec = 1;
+ required string query = 2;
+ optional string cursor = 4;
+ optional int32 offset = 11;
+
+ enum CursorType {
+ NONE = 0;
+ SINGLE = 1;
+ PER_RESULT = 2;
+ }
+ optional CursorType cursor_type = 5 [default = NONE];
+
+ optional int32 limit = 6 [default = 20];
+ optional int32 matched_count_accuracy = 7;
+ repeated SortSpec sort_spec = 8;
+ optional ScorerSpec scorer_spec = 9;
+ optional FieldSpec field_spec = 10;
+ optional bool keys_only = 12;
+
+ enum ParsingMode {
+ STRICT = 0;
+ RELAXED = 1;
+ }
+ optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+ optional int32 auto_discover_facet_count = 15 [default = 0];
+ repeated FacetRequest include_facet = 16;
+ repeated FacetRefinement facet_refinement = 17;
+ optional FacetAutoDetectParam facet_auto_detect_param = 18;
+ optional int32 facet_depth = 19 [default=1000];
+}
+
+message SearchRequest {
+ required SearchParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+ required string name = 1;
+ required int32 count = 2;
+ required FacetRefinement refinement = 3;
+}
+
+message FacetResult {
+ required string name = 1;
+ repeated FacetResultValue value = 2;
+}
+
+message SearchResult {
+ required Document document = 1;
+ repeated Field expression = 4;
+ repeated double score = 2;
+ optional string cursor = 3;
+}
+
+message SearchResponse {
+ repeated SearchResult result = 1;
+ required int64 matched_count = 2;
+ required RequestStatus status = 3;
+ optional string cursor = 4;
+ repeated FacetResult facet_result = 5;
+
+ extensions 1000 to 9999;
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
new file mode 100644
index 000000000..60628ec9b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
@@ -0,0 +1,1858 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/socket/socket_service.proto
+// DO NOT EDIT!
+
+/*
+Package socket is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/socket/socket_service.proto
+
+It has these top-level messages:
+ RemoteSocketServiceError
+ AddressPort
+ CreateSocketRequest
+ CreateSocketReply
+ BindRequest
+ BindReply
+ GetSocketNameRequest
+ GetSocketNameReply
+ GetPeerNameRequest
+ GetPeerNameReply
+ SocketOption
+ SetSocketOptionsRequest
+ SetSocketOptionsReply
+ GetSocketOptionsRequest
+ GetSocketOptionsReply
+ ConnectRequest
+ ConnectReply
+ ListenRequest
+ ListenReply
+ AcceptRequest
+ AcceptReply
+ ShutDownRequest
+ ShutDownReply
+ CloseRequest
+ CloseReply
+ SendRequest
+ SendReply
+ ReceiveRequest
+ ReceiveReply
+ PollEvent
+ PollRequest
+ PollReply
+ ResolveRequest
+ ResolveReply
+*/
+package socket
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RemoteSocketServiceError_ErrorCode int32
+
+const (
+ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1
+ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2
+ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4
+ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5
+ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6
+ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7
+)
+
+var RemoteSocketServiceError_ErrorCode_name = map[int32]string{
+ 1: "SYSTEM_ERROR",
+ 2: "GAI_ERROR",
+ 4: "FAILURE",
+ 5: "PERMISSION_DENIED",
+ 6: "INVALID_REQUEST",
+ 7: "SOCKET_CLOSED",
+}
+var RemoteSocketServiceError_ErrorCode_value = map[string]int32{
+ "SYSTEM_ERROR": 1,
+ "GAI_ERROR": 2,
+ "FAILURE": 4,
+ "PERMISSION_DENIED": 5,
+ "INVALID_REQUEST": 6,
+ "SOCKET_CLOSED": 7,
+}
+
+func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode {
+ p := new(RemoteSocketServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_ErrorCode) String() string {
+ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x))
+}
+func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError_SystemError int32
+
+const (
+ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0
+ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1
+ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2
+ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3
+ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4
+ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5
+ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6
+ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7
+ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8
+ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9
+ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10
+ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12
+ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13
+ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14
+ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15
+ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16
+ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17
+ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18
+ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19
+ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20
+ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21
+ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22
+ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23
+ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24
+ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25
+ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26
+ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27
+ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28
+ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29
+ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30
+ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31
+ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32
+ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33
+ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34
+ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36
+ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37
+ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38
+ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39
+ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40
+ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42
+ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43
+ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44
+ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45
+ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46
+ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47
+ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48
+ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49
+ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50
+ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51
+ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52
+ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53
+ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54
+ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55
+ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56
+ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57
+ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59
+ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60
+ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61
+ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62
+ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63
+ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64
+ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65
+ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66
+ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67
+ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68
+ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69
+ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70
+ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71
+ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72
+ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73
+ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74
+ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75
+ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76
+ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77
+ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78
+ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79
+ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80
+ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81
+ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82
+ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83
+ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84
+ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85
+ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86
+ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87
+ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88
+ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89
+ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90
+ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91
+ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92
+ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93
+ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94
+ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96
+ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97
+ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98
+ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99
+ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100
+ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101
+ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102
+ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103
+ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104
+ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105
+ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106
+ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107
+ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108
+ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109
+ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110
+ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111
+ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112
+ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113
+ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114
+ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115
+ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116
+ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117
+ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118
+ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119
+ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120
+ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121
+ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122
+ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123
+ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124
+ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125
+ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126
+ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127
+ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128
+ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129
+ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130
+ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131
+ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132
+)
+
+var RemoteSocketServiceError_SystemError_name = map[int32]string{
+ 0: "SYS_SUCCESS",
+ 1: "SYS_EPERM",
+ 2: "SYS_ENOENT",
+ 3: "SYS_ESRCH",
+ 4: "SYS_EINTR",
+ 5: "SYS_EIO",
+ 6: "SYS_ENXIO",
+ 7: "SYS_E2BIG",
+ 8: "SYS_ENOEXEC",
+ 9: "SYS_EBADF",
+ 10: "SYS_ECHILD",
+ 11: "SYS_EAGAIN",
+ // Duplicate value: 11: "SYS_EWOULDBLOCK",
+ 12: "SYS_ENOMEM",
+ 13: "SYS_EACCES",
+ 14: "SYS_EFAULT",
+ 15: "SYS_ENOTBLK",
+ 16: "SYS_EBUSY",
+ 17: "SYS_EEXIST",
+ 18: "SYS_EXDEV",
+ 19: "SYS_ENODEV",
+ 20: "SYS_ENOTDIR",
+ 21: "SYS_EISDIR",
+ 22: "SYS_EINVAL",
+ 23: "SYS_ENFILE",
+ 24: "SYS_EMFILE",
+ 25: "SYS_ENOTTY",
+ 26: "SYS_ETXTBSY",
+ 27: "SYS_EFBIG",
+ 28: "SYS_ENOSPC",
+ 29: "SYS_ESPIPE",
+ 30: "SYS_EROFS",
+ 31: "SYS_EMLINK",
+ 32: "SYS_EPIPE",
+ 33: "SYS_EDOM",
+ 34: "SYS_ERANGE",
+ 35: "SYS_EDEADLK",
+ // Duplicate value: 35: "SYS_EDEADLOCK",
+ 36: "SYS_ENAMETOOLONG",
+ 37: "SYS_ENOLCK",
+ 38: "SYS_ENOSYS",
+ 39: "SYS_ENOTEMPTY",
+ 40: "SYS_ELOOP",
+ 42: "SYS_ENOMSG",
+ 43: "SYS_EIDRM",
+ 44: "SYS_ECHRNG",
+ 45: "SYS_EL2NSYNC",
+ 46: "SYS_EL3HLT",
+ 47: "SYS_EL3RST",
+ 48: "SYS_ELNRNG",
+ 49: "SYS_EUNATCH",
+ 50: "SYS_ENOCSI",
+ 51: "SYS_EL2HLT",
+ 52: "SYS_EBADE",
+ 53: "SYS_EBADR",
+ 54: "SYS_EXFULL",
+ 55: "SYS_ENOANO",
+ 56: "SYS_EBADRQC",
+ 57: "SYS_EBADSLT",
+ 59: "SYS_EBFONT",
+ 60: "SYS_ENOSTR",
+ 61: "SYS_ENODATA",
+ 62: "SYS_ETIME",
+ 63: "SYS_ENOSR",
+ 64: "SYS_ENONET",
+ 65: "SYS_ENOPKG",
+ 66: "SYS_EREMOTE",
+ 67: "SYS_ENOLINK",
+ 68: "SYS_EADV",
+ 69: "SYS_ESRMNT",
+ 70: "SYS_ECOMM",
+ 71: "SYS_EPROTO",
+ 72: "SYS_EMULTIHOP",
+ 73: "SYS_EDOTDOT",
+ 74: "SYS_EBADMSG",
+ 75: "SYS_EOVERFLOW",
+ 76: "SYS_ENOTUNIQ",
+ 77: "SYS_EBADFD",
+ 78: "SYS_EREMCHG",
+ 79: "SYS_ELIBACC",
+ 80: "SYS_ELIBBAD",
+ 81: "SYS_ELIBSCN",
+ 82: "SYS_ELIBMAX",
+ 83: "SYS_ELIBEXEC",
+ 84: "SYS_EILSEQ",
+ 85: "SYS_ERESTART",
+ 86: "SYS_ESTRPIPE",
+ 87: "SYS_EUSERS",
+ 88: "SYS_ENOTSOCK",
+ 89: "SYS_EDESTADDRREQ",
+ 90: "SYS_EMSGSIZE",
+ 91: "SYS_EPROTOTYPE",
+ 92: "SYS_ENOPROTOOPT",
+ 93: "SYS_EPROTONOSUPPORT",
+ 94: "SYS_ESOCKTNOSUPPORT",
+ 95: "SYS_EOPNOTSUPP",
+ // Duplicate value: 95: "SYS_ENOTSUP",
+ 96: "SYS_EPFNOSUPPORT",
+ 97: "SYS_EAFNOSUPPORT",
+ 98: "SYS_EADDRINUSE",
+ 99: "SYS_EADDRNOTAVAIL",
+ 100: "SYS_ENETDOWN",
+ 101: "SYS_ENETUNREACH",
+ 102: "SYS_ENETRESET",
+ 103: "SYS_ECONNABORTED",
+ 104: "SYS_ECONNRESET",
+ 105: "SYS_ENOBUFS",
+ 106: "SYS_EISCONN",
+ 107: "SYS_ENOTCONN",
+ 108: "SYS_ESHUTDOWN",
+ 109: "SYS_ETOOMANYREFS",
+ 110: "SYS_ETIMEDOUT",
+ 111: "SYS_ECONNREFUSED",
+ 112: "SYS_EHOSTDOWN",
+ 113: "SYS_EHOSTUNREACH",
+ 114: "SYS_EALREADY",
+ 115: "SYS_EINPROGRESS",
+ 116: "SYS_ESTALE",
+ 117: "SYS_EUCLEAN",
+ 118: "SYS_ENOTNAM",
+ 119: "SYS_ENAVAIL",
+ 120: "SYS_EISNAM",
+ 121: "SYS_EREMOTEIO",
+ 122: "SYS_EDQUOT",
+ 123: "SYS_ENOMEDIUM",
+ 124: "SYS_EMEDIUMTYPE",
+ 125: "SYS_ECANCELED",
+ 126: "SYS_ENOKEY",
+ 127: "SYS_EKEYEXPIRED",
+ 128: "SYS_EKEYREVOKED",
+ 129: "SYS_EKEYREJECTED",
+ 130: "SYS_EOWNERDEAD",
+ 131: "SYS_ENOTRECOVERABLE",
+ 132: "SYS_ERFKILL",
+}
+var RemoteSocketServiceError_SystemError_value = map[string]int32{
+ "SYS_SUCCESS": 0,
+ "SYS_EPERM": 1,
+ "SYS_ENOENT": 2,
+ "SYS_ESRCH": 3,
+ "SYS_EINTR": 4,
+ "SYS_EIO": 5,
+ "SYS_ENXIO": 6,
+ "SYS_E2BIG": 7,
+ "SYS_ENOEXEC": 8,
+ "SYS_EBADF": 9,
+ "SYS_ECHILD": 10,
+ "SYS_EAGAIN": 11,
+ "SYS_EWOULDBLOCK": 11,
+ "SYS_ENOMEM": 12,
+ "SYS_EACCES": 13,
+ "SYS_EFAULT": 14,
+ "SYS_ENOTBLK": 15,
+ "SYS_EBUSY": 16,
+ "SYS_EEXIST": 17,
+ "SYS_EXDEV": 18,
+ "SYS_ENODEV": 19,
+ "SYS_ENOTDIR": 20,
+ "SYS_EISDIR": 21,
+ "SYS_EINVAL": 22,
+ "SYS_ENFILE": 23,
+ "SYS_EMFILE": 24,
+ "SYS_ENOTTY": 25,
+ "SYS_ETXTBSY": 26,
+ "SYS_EFBIG": 27,
+ "SYS_ENOSPC": 28,
+ "SYS_ESPIPE": 29,
+ "SYS_EROFS": 30,
+ "SYS_EMLINK": 31,
+ "SYS_EPIPE": 32,
+ "SYS_EDOM": 33,
+ "SYS_ERANGE": 34,
+ "SYS_EDEADLK": 35,
+ "SYS_EDEADLOCK": 35,
+ "SYS_ENAMETOOLONG": 36,
+ "SYS_ENOLCK": 37,
+ "SYS_ENOSYS": 38,
+ "SYS_ENOTEMPTY": 39,
+ "SYS_ELOOP": 40,
+ "SYS_ENOMSG": 42,
+ "SYS_EIDRM": 43,
+ "SYS_ECHRNG": 44,
+ "SYS_EL2NSYNC": 45,
+ "SYS_EL3HLT": 46,
+ "SYS_EL3RST": 47,
+ "SYS_ELNRNG": 48,
+ "SYS_EUNATCH": 49,
+ "SYS_ENOCSI": 50,
+ "SYS_EL2HLT": 51,
+ "SYS_EBADE": 52,
+ "SYS_EBADR": 53,
+ "SYS_EXFULL": 54,
+ "SYS_ENOANO": 55,
+ "SYS_EBADRQC": 56,
+ "SYS_EBADSLT": 57,
+ "SYS_EBFONT": 59,
+ "SYS_ENOSTR": 60,
+ "SYS_ENODATA": 61,
+ "SYS_ETIME": 62,
+ "SYS_ENOSR": 63,
+ "SYS_ENONET": 64,
+ "SYS_ENOPKG": 65,
+ "SYS_EREMOTE": 66,
+ "SYS_ENOLINK": 67,
+ "SYS_EADV": 68,
+ "SYS_ESRMNT": 69,
+ "SYS_ECOMM": 70,
+ "SYS_EPROTO": 71,
+ "SYS_EMULTIHOP": 72,
+ "SYS_EDOTDOT": 73,
+ "SYS_EBADMSG": 74,
+ "SYS_EOVERFLOW": 75,
+ "SYS_ENOTUNIQ": 76,
+ "SYS_EBADFD": 77,
+ "SYS_EREMCHG": 78,
+ "SYS_ELIBACC": 79,
+ "SYS_ELIBBAD": 80,
+ "SYS_ELIBSCN": 81,
+ "SYS_ELIBMAX": 82,
+ "SYS_ELIBEXEC": 83,
+ "SYS_EILSEQ": 84,
+ "SYS_ERESTART": 85,
+ "SYS_ESTRPIPE": 86,
+ "SYS_EUSERS": 87,
+ "SYS_ENOTSOCK": 88,
+ "SYS_EDESTADDRREQ": 89,
+ "SYS_EMSGSIZE": 90,
+ "SYS_EPROTOTYPE": 91,
+ "SYS_ENOPROTOOPT": 92,
+ "SYS_EPROTONOSUPPORT": 93,
+ "SYS_ESOCKTNOSUPPORT": 94,
+ "SYS_EOPNOTSUPP": 95,
+ "SYS_ENOTSUP": 95,
+ "SYS_EPFNOSUPPORT": 96,
+ "SYS_EAFNOSUPPORT": 97,
+ "SYS_EADDRINUSE": 98,
+ "SYS_EADDRNOTAVAIL": 99,
+ "SYS_ENETDOWN": 100,
+ "SYS_ENETUNREACH": 101,
+ "SYS_ENETRESET": 102,
+ "SYS_ECONNABORTED": 103,
+ "SYS_ECONNRESET": 104,
+ "SYS_ENOBUFS": 105,
+ "SYS_EISCONN": 106,
+ "SYS_ENOTCONN": 107,
+ "SYS_ESHUTDOWN": 108,
+ "SYS_ETOOMANYREFS": 109,
+ "SYS_ETIMEDOUT": 110,
+ "SYS_ECONNREFUSED": 111,
+ "SYS_EHOSTDOWN": 112,
+ "SYS_EHOSTUNREACH": 113,
+ "SYS_EALREADY": 114,
+ "SYS_EINPROGRESS": 115,
+ "SYS_ESTALE": 116,
+ "SYS_EUCLEAN": 117,
+ "SYS_ENOTNAM": 118,
+ "SYS_ENAVAIL": 119,
+ "SYS_EISNAM": 120,
+ "SYS_EREMOTEIO": 121,
+ "SYS_EDQUOT": 122,
+ "SYS_ENOMEDIUM": 123,
+ "SYS_EMEDIUMTYPE": 124,
+ "SYS_ECANCELED": 125,
+ "SYS_ENOKEY": 126,
+ "SYS_EKEYEXPIRED": 127,
+ "SYS_EKEYREVOKED": 128,
+ "SYS_EKEYREJECTED": 129,
+ "SYS_EOWNERDEAD": 130,
+ "SYS_ENOTRECOVERABLE": 131,
+ "SYS_ERFKILL": 132,
+}
+
+func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError {
+ p := new(RemoteSocketServiceError_SystemError)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_SystemError) String() string {
+ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x))
+}
+func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_SystemError(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketFamily int32
+
+const (
+ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1
+ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2
+)
+
+var CreateSocketRequest_SocketFamily_name = map[int32]string{
+ 1: "IPv4",
+ 2: "IPv6",
+}
+var CreateSocketRequest_SocketFamily_value = map[string]int32{
+ "IPv4": 1,
+ "IPv6": 2,
+}
+
+func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily {
+ p := new(CreateSocketRequest_SocketFamily)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketFamily) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketFamily(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketProtocol int32
+
+const (
+ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1
+ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2
+)
+
+var CreateSocketRequest_SocketProtocol_name = map[int32]string{
+ 1: "TCP",
+ 2: "UDP",
+}
+var CreateSocketRequest_SocketProtocol_value = map[string]int32{
+ "TCP": 1,
+ "UDP": 2,
+}
+
+func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol {
+ p := new(CreateSocketRequest_SocketProtocol)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketProtocol) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketProtocol(value)
+ return nil
+}
+
+type SocketOption_SocketOptionLevel int32
+
+const (
+ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0
+ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1
+ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6
+ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17
+)
+
+var SocketOption_SocketOptionLevel_name = map[int32]string{
+ 0: "SOCKET_SOL_IP",
+ 1: "SOCKET_SOL_SOCKET",
+ 6: "SOCKET_SOL_TCP",
+ 17: "SOCKET_SOL_UDP",
+}
+var SocketOption_SocketOptionLevel_value = map[string]int32{
+ "SOCKET_SOL_IP": 0,
+ "SOCKET_SOL_SOCKET": 1,
+ "SOCKET_SOL_TCP": 6,
+ "SOCKET_SOL_UDP": 17,
+}
+
+func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel {
+ p := new(SocketOption_SocketOptionLevel)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionLevel) String() string {
+ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x))
+}
+func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionLevel(value)
+ return nil
+}
+
+type SocketOption_SocketOptionName int32
+
+const (
+ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13
+ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20
+ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21
+ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11
+ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12
+)
+
+var SocketOption_SocketOptionName_name = map[int32]string{
+ 1: "SOCKET_SO_DEBUG",
+ 2: "SOCKET_SO_REUSEADDR",
+ 3: "SOCKET_SO_TYPE",
+ 4: "SOCKET_SO_ERROR",
+ 5: "SOCKET_SO_DONTROUTE",
+ 6: "SOCKET_SO_BROADCAST",
+ 7: "SOCKET_SO_SNDBUF",
+ 8: "SOCKET_SO_RCVBUF",
+ 9: "SOCKET_SO_KEEPALIVE",
+ 10: "SOCKET_SO_OOBINLINE",
+ 13: "SOCKET_SO_LINGER",
+ 20: "SOCKET_SO_RCVTIMEO",
+ 21: "SOCKET_SO_SNDTIMEO",
+ // Duplicate value: 1: "SOCKET_IP_TOS",
+ // Duplicate value: 2: "SOCKET_IP_TTL",
+ // Duplicate value: 3: "SOCKET_IP_HDRINCL",
+ // Duplicate value: 4: "SOCKET_IP_OPTIONS",
+ // Duplicate value: 1: "SOCKET_TCP_NODELAY",
+ // Duplicate value: 2: "SOCKET_TCP_MAXSEG",
+ // Duplicate value: 3: "SOCKET_TCP_CORK",
+ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE",
+ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL",
+ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT",
+ // Duplicate value: 7: "SOCKET_TCP_SYNCNT",
+ // Duplicate value: 8: "SOCKET_TCP_LINGER2",
+ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT",
+ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP",
+ 11: "SOCKET_TCP_INFO",
+ 12: "SOCKET_TCP_QUICKACK",
+}
+var SocketOption_SocketOptionName_value = map[string]int32{
+ "SOCKET_SO_DEBUG": 1,
+ "SOCKET_SO_REUSEADDR": 2,
+ "SOCKET_SO_TYPE": 3,
+ "SOCKET_SO_ERROR": 4,
+ "SOCKET_SO_DONTROUTE": 5,
+ "SOCKET_SO_BROADCAST": 6,
+ "SOCKET_SO_SNDBUF": 7,
+ "SOCKET_SO_RCVBUF": 8,
+ "SOCKET_SO_KEEPALIVE": 9,
+ "SOCKET_SO_OOBINLINE": 10,
+ "SOCKET_SO_LINGER": 13,
+ "SOCKET_SO_RCVTIMEO": 20,
+ "SOCKET_SO_SNDTIMEO": 21,
+ "SOCKET_IP_TOS": 1,
+ "SOCKET_IP_TTL": 2,
+ "SOCKET_IP_HDRINCL": 3,
+ "SOCKET_IP_OPTIONS": 4,
+ "SOCKET_TCP_NODELAY": 1,
+ "SOCKET_TCP_MAXSEG": 2,
+ "SOCKET_TCP_CORK": 3,
+ "SOCKET_TCP_KEEPIDLE": 4,
+ "SOCKET_TCP_KEEPINTVL": 5,
+ "SOCKET_TCP_KEEPCNT": 6,
+ "SOCKET_TCP_SYNCNT": 7,
+ "SOCKET_TCP_LINGER2": 8,
+ "SOCKET_TCP_DEFER_ACCEPT": 9,
+ "SOCKET_TCP_WINDOW_CLAMP": 10,
+ "SOCKET_TCP_INFO": 11,
+ "SOCKET_TCP_QUICKACK": 12,
+}
+
+func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName {
+ p := new(SocketOption_SocketOptionName)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionName) String() string {
+ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x))
+}
+func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionName(value)
+ return nil
+}
+
+type ShutDownRequest_How int32
+
+const (
+ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1
+ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2
+ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3
+)
+
+var ShutDownRequest_How_name = map[int32]string{
+ 1: "SOCKET_SHUT_RD",
+ 2: "SOCKET_SHUT_WR",
+ 3: "SOCKET_SHUT_RDWR",
+}
+var ShutDownRequest_How_value = map[string]int32{
+ "SOCKET_SHUT_RD": 1,
+ "SOCKET_SHUT_WR": 2,
+ "SOCKET_SHUT_RDWR": 3,
+}
+
+func (x ShutDownRequest_How) Enum() *ShutDownRequest_How {
+ p := new(ShutDownRequest_How)
+ *p = x
+ return p
+}
+func (x ShutDownRequest_How) String() string {
+ return proto.EnumName(ShutDownRequest_How_name, int32(x))
+}
+func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How")
+ if err != nil {
+ return err
+ }
+ *x = ShutDownRequest_How(value)
+ return nil
+}
+
+type ReceiveRequest_Flags int32
+
+const (
+ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1
+ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2
+)
+
+var ReceiveRequest_Flags_name = map[int32]string{
+ 1: "MSG_OOB",
+ 2: "MSG_PEEK",
+}
+var ReceiveRequest_Flags_value = map[string]int32{
+ "MSG_OOB": 1,
+ "MSG_PEEK": 2,
+}
+
+func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags {
+ p := new(ReceiveRequest_Flags)
+ *p = x
+ return p
+}
+func (x ReceiveRequest_Flags) String() string {
+ return proto.EnumName(ReceiveRequest_Flags_name, int32(x))
+}
+func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags")
+ if err != nil {
+ return err
+ }
+ *x = ReceiveRequest_Flags(value)
+ return nil
+}
+
+type PollEvent_PollEventFlag int32
+
+const (
+ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0
+ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1
+ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2
+ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4
+ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8
+ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16
+ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32
+ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64
+ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128
+ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256
+ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512
+ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024
+ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096
+ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192
+)
+
+var PollEvent_PollEventFlag_name = map[int32]string{
+ 0: "SOCKET_POLLNONE",
+ 1: "SOCKET_POLLIN",
+ 2: "SOCKET_POLLPRI",
+ 4: "SOCKET_POLLOUT",
+ 8: "SOCKET_POLLERR",
+ 16: "SOCKET_POLLHUP",
+ 32: "SOCKET_POLLNVAL",
+ 64: "SOCKET_POLLRDNORM",
+ 128: "SOCKET_POLLRDBAND",
+ 256: "SOCKET_POLLWRNORM",
+ 512: "SOCKET_POLLWRBAND",
+ 1024: "SOCKET_POLLMSG",
+ 4096: "SOCKET_POLLREMOVE",
+ 8192: "SOCKET_POLLRDHUP",
+}
+var PollEvent_PollEventFlag_value = map[string]int32{
+ "SOCKET_POLLNONE": 0,
+ "SOCKET_POLLIN": 1,
+ "SOCKET_POLLPRI": 2,
+ "SOCKET_POLLOUT": 4,
+ "SOCKET_POLLERR": 8,
+ "SOCKET_POLLHUP": 16,
+ "SOCKET_POLLNVAL": 32,
+ "SOCKET_POLLRDNORM": 64,
+ "SOCKET_POLLRDBAND": 128,
+ "SOCKET_POLLWRNORM": 256,
+ "SOCKET_POLLWRBAND": 512,
+ "SOCKET_POLLMSG": 1024,
+ "SOCKET_POLLREMOVE": 4096,
+ "SOCKET_POLLRDHUP": 8192,
+}
+
+func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag {
+ p := new(PollEvent_PollEventFlag)
+ *p = x
+ return p
+}
+func (x PollEvent_PollEventFlag) String() string {
+ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x))
+}
+func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag")
+ if err != nil {
+ return err
+ }
+ *x = PollEvent_PollEventFlag(value)
+ return nil
+}
+
+type ResolveReply_ErrorCode int32
+
+const (
+ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1
+ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2
+ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3
+ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4
+ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5
+ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6
+ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7
+ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8
+ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9
+ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10
+ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11
+ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12
+ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13
+ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14
+ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15
+)
+
+var ResolveReply_ErrorCode_name = map[int32]string{
+ 1: "SOCKET_EAI_ADDRFAMILY",
+ 2: "SOCKET_EAI_AGAIN",
+ 3: "SOCKET_EAI_BADFLAGS",
+ 4: "SOCKET_EAI_FAIL",
+ 5: "SOCKET_EAI_FAMILY",
+ 6: "SOCKET_EAI_MEMORY",
+ 7: "SOCKET_EAI_NODATA",
+ 8: "SOCKET_EAI_NONAME",
+ 9: "SOCKET_EAI_SERVICE",
+ 10: "SOCKET_EAI_SOCKTYPE",
+ 11: "SOCKET_EAI_SYSTEM",
+ 12: "SOCKET_EAI_BADHINTS",
+ 13: "SOCKET_EAI_PROTOCOL",
+ 14: "SOCKET_EAI_OVERFLOW",
+ 15: "SOCKET_EAI_MAX",
+}
+var ResolveReply_ErrorCode_value = map[string]int32{
+ "SOCKET_EAI_ADDRFAMILY": 1,
+ "SOCKET_EAI_AGAIN": 2,
+ "SOCKET_EAI_BADFLAGS": 3,
+ "SOCKET_EAI_FAIL": 4,
+ "SOCKET_EAI_FAMILY": 5,
+ "SOCKET_EAI_MEMORY": 6,
+ "SOCKET_EAI_NODATA": 7,
+ "SOCKET_EAI_NONAME": 8,
+ "SOCKET_EAI_SERVICE": 9,
+ "SOCKET_EAI_SOCKTYPE": 10,
+ "SOCKET_EAI_SYSTEM": 11,
+ "SOCKET_EAI_BADHINTS": 12,
+ "SOCKET_EAI_PROTOCOL": 13,
+ "SOCKET_EAI_OVERFLOW": 14,
+ "SOCKET_EAI_MAX": 15,
+}
+
+func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode {
+ p := new(ResolveReply_ErrorCode)
+ *p = x
+ return p
+}
+func (x ResolveReply_ErrorCode) String() string {
+ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x))
+}
+func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ResolveReply_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError struct {
+ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} }
+func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) }
+func (*RemoteSocketServiceError) ProtoMessage() {}
+
+const Default_RemoteSocketServiceError_SystemError int32 = 0
+
+func (m *RemoteSocketServiceError) GetSystemError() int32 {
+ if m != nil && m.SystemError != nil {
+ return *m.SystemError
+ }
+ return Default_RemoteSocketServiceError_SystemError
+}
+
+func (m *RemoteSocketServiceError) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type AddressPort struct {
+ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"`
+ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"`
+ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddressPort) Reset() { *m = AddressPort{} }
+func (m *AddressPort) String() string { return proto.CompactTextString(m) }
+func (*AddressPort) ProtoMessage() {}
+
+func (m *AddressPort) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *AddressPort) GetPackedAddress() []byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *AddressPort) GetHostnameHint() string {
+ if m != nil && m.HostnameHint != nil {
+ return *m.HostnameHint
+ }
+ return ""
+}
+
+type CreateSocketRequest struct {
+ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"`
+ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"`
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"`
+ AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"`
+ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} }
+func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketRequest) ProtoMessage() {}
+
+const Default_CreateSocketRequest_ListenBacklog int32 = 0
+
+func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily {
+ if m != nil && m.Family != nil {
+ return *m.Family
+ }
+ return CreateSocketRequest_IPv4
+}
+
+func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol {
+ if m != nil && m.Protocol != nil {
+ return *m.Protocol
+ }
+ return CreateSocketRequest_TCP
+}
+
+func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption {
+ if m != nil {
+ return m.SocketOptions
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetListenBacklog() int32 {
+ if m != nil && m.ListenBacklog != nil {
+ return *m.ListenBacklog
+ }
+ return Default_CreateSocketRequest_ListenBacklog
+}
+
+func (m *CreateSocketRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CreateSocketRequest) GetProjectId() int64 {
+ if m != nil && m.ProjectId != nil {
+ return *m.ProjectId
+ }
+ return 0
+}
+
+type CreateSocketReply struct {
+ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} }
+func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketReply) ProtoMessage() {}
+
+var extRange_CreateSocketReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_CreateSocketReply
+}
+func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *CreateSocketReply) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CreateSocketReply) GetServerAddress() *AddressPort {
+ if m != nil {
+ return m.ServerAddress
+ }
+ return nil
+}
+
+func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindRequest) Reset() { *m = BindRequest{} }
+func (m *BindRequest) String() string { return proto.CompactTextString(m) }
+func (*BindRequest) ProtoMessage() {}
+
+func (m *BindRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *BindRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindReply) Reset() { *m = BindReply{} }
+func (m *BindReply) String() string { return proto.CompactTextString(m) }
+func (*BindReply) ProtoMessage() {}
+
+func (m *BindReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetSocketNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} }
+func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameRequest) ProtoMessage() {}
+
+func (m *GetSocketNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetSocketNameReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} }
+func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameReply) ProtoMessage() {}
+
+func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetPeerNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} }
+func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameRequest) ProtoMessage() {}
+
+func (m *GetPeerNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetPeerNameReply struct {
+ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} }
+func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameReply) ProtoMessage() {}
+
+func (m *GetPeerNameReply) GetPeerIp() *AddressPort {
+ if m != nil {
+ return m.PeerIp
+ }
+ return nil
+}
+
+type SocketOption struct {
+ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"`
+ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SocketOption) Reset() { *m = SocketOption{} }
+func (m *SocketOption) String() string { return proto.CompactTextString(m) }
+func (*SocketOption) ProtoMessage() {}
+
+func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return SocketOption_SOCKET_SOL_IP
+}
+
+func (m *SocketOption) GetOption() SocketOption_SocketOptionName {
+ if m != nil && m.Option != nil {
+ return *m.Option
+ }
+ return SocketOption_SOCKET_SO_DEBUG
+}
+
+func (m *SocketOption) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} }
+func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *SetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type SetSocketOptionsReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} }
+func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsReply) ProtoMessage() {}
+
+type GetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} }
+func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *GetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type GetSocketOptionsReply struct {
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} }
+func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsReply) ProtoMessage() {}
+
+func (m *GetSocketOptionsReply) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type ConnectRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectRequest) Reset() { *m = ConnectRequest{} }
+func (m *ConnectRequest) String() string { return proto.CompactTextString(m) }
+func (*ConnectRequest) ProtoMessage() {}
+
+const Default_ConnectRequest_TimeoutSeconds float64 = -1
+
+func (m *ConnectRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ConnectRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *ConnectRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ConnectRequest_TimeoutSeconds
+}
+
+type ConnectReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectReply) Reset() { *m = ConnectReply{} }
+func (m *ConnectReply) String() string { return proto.CompactTextString(m) }
+func (*ConnectReply) ProtoMessage() {}
+
+var extRange_ConnectReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ConnectReply
+}
+func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *ConnectReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type ListenRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenRequest) Reset() { *m = ListenRequest{} }
+func (m *ListenRequest) String() string { return proto.CompactTextString(m) }
+func (*ListenRequest) ProtoMessage() {}
+
+func (m *ListenRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ListenRequest) GetBacklog() int32 {
+ if m != nil && m.Backlog != nil {
+ return *m.Backlog
+ }
+ return 0
+}
+
+type ListenReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenReply) Reset() { *m = ListenReply{} }
+func (m *ListenReply) String() string { return proto.CompactTextString(m) }
+func (*ListenReply) ProtoMessage() {}
+
+type AcceptRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptRequest) Reset() { *m = AcceptRequest{} }
+func (m *AcceptRequest) String() string { return proto.CompactTextString(m) }
+func (*AcceptRequest) ProtoMessage() {}
+
+const Default_AcceptRequest_TimeoutSeconds float64 = -1
+
+func (m *AcceptRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *AcceptRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_AcceptRequest_TimeoutSeconds
+}
+
+type AcceptReply struct {
+ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"`
+ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptReply) Reset() { *m = AcceptReply{} }
+func (m *AcceptReply) String() string { return proto.CompactTextString(m) }
+func (*AcceptReply) ProtoMessage() {}
+
+func (m *AcceptReply) GetNewSocketDescriptor() []byte {
+ if m != nil {
+ return m.NewSocketDescriptor
+ }
+ return nil
+}
+
+func (m *AcceptReply) GetRemoteAddress() *AddressPort {
+ if m != nil {
+ return m.RemoteAddress
+ }
+ return nil
+}
+
+type ShutDownRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"`
+ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} }
+func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) }
+func (*ShutDownRequest) ProtoMessage() {}
+
+func (m *ShutDownRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ShutDownRequest) GetHow() ShutDownRequest_How {
+ if m != nil && m.How != nil {
+ return *m.How
+ }
+ return ShutDownRequest_SOCKET_SHUT_RD
+}
+
+func (m *ShutDownRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return 0
+}
+
+type ShutDownReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownReply) Reset() { *m = ShutDownReply{} }
+func (m *ShutDownReply) String() string { return proto.CompactTextString(m) }
+func (*ShutDownReply) ProtoMessage() {}
+
+type CloseRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseRequest) Reset() { *m = CloseRequest{} }
+func (m *CloseRequest) String() string { return proto.CompactTextString(m) }
+func (*CloseRequest) ProtoMessage() {}
+
+const Default_CloseRequest_SendOffset int64 = -1
+
+func (m *CloseRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CloseRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return Default_CloseRequest_SendOffset
+}
+
+type CloseReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseReply) Reset() { *m = CloseReply{} }
+func (m *CloseReply) String() string { return proto.CompactTextString(m) }
+func (*CloseReply) ProtoMessage() {}
+
+type SendRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"`
+ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"`
+ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"`
+ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendRequest) Reset() { *m = SendRequest{} }
+func (m *SendRequest) String() string { return proto.CompactTextString(m) }
+func (*SendRequest) ProtoMessage() {}
+
+const Default_SendRequest_Flags int32 = 0
+const Default_SendRequest_TimeoutSeconds float64 = -1
+
+func (m *SendRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SendRequest) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendRequest) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *SendRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_SendRequest_Flags
+}
+
+func (m *SendRequest) GetSendTo() *AddressPort {
+ if m != nil {
+ return m.SendTo
+ }
+ return nil
+}
+
+func (m *SendRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_SendRequest_TimeoutSeconds
+}
+
+type SendReply struct {
+ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendReply) Reset() { *m = SendReply{} }
+func (m *SendReply) String() string { return proto.CompactTextString(m) }
+func (*SendReply) ProtoMessage() {}
+
+func (m *SendReply) GetDataSent() int32 {
+ if m != nil && m.DataSent != nil {
+ return *m.DataSent
+ }
+ return 0
+}
+
+type ReceiveRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"`
+ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} }
+func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) }
+func (*ReceiveRequest) ProtoMessage() {}
+
+const Default_ReceiveRequest_Flags int32 = 0
+const Default_ReceiveRequest_TimeoutSeconds float64 = -1
+
+func (m *ReceiveRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ReceiveRequest) GetDataSize() int32 {
+ if m != nil && m.DataSize != nil {
+ return *m.DataSize
+ }
+ return 0
+}
+
+func (m *ReceiveRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_ReceiveRequest_Flags
+}
+
+func (m *ReceiveRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ReceiveRequest_TimeoutSeconds
+}
+
+type ReceiveReply struct {
+ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"`
+ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveReply) Reset() { *m = ReceiveReply{} }
+func (m *ReceiveReply) String() string { return proto.CompactTextString(m) }
+func (*ReceiveReply) ProtoMessage() {}
+
+func (m *ReceiveReply) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *ReceiveReply) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetReceivedFrom() *AddressPort {
+ if m != nil {
+ return m.ReceivedFrom
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetBufferSize() int32 {
+ if m != nil && m.BufferSize != nil {
+ return *m.BufferSize
+ }
+ return 0
+}
+
+type PollEvent struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"`
+ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollEvent) Reset() { *m = PollEvent{} }
+func (m *PollEvent) String() string { return proto.CompactTextString(m) }
+func (*PollEvent) ProtoMessage() {}
+
+func (m *PollEvent) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *PollEvent) GetRequestedEvents() int32 {
+ if m != nil && m.RequestedEvents != nil {
+ return *m.RequestedEvents
+ }
+ return 0
+}
+
+func (m *PollEvent) GetObservedEvents() int32 {
+ if m != nil && m.ObservedEvents != nil {
+ return *m.ObservedEvents
+ }
+ return 0
+}
+
+type PollRequest struct {
+ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollRequest) Reset() { *m = PollRequest{} }
+func (m *PollRequest) String() string { return proto.CompactTextString(m) }
+func (*PollRequest) ProtoMessage() {}
+
+const Default_PollRequest_TimeoutSeconds float64 = -1
+
+func (m *PollRequest) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+func (m *PollRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_PollRequest_TimeoutSeconds
+}
+
+type PollReply struct {
+ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollReply) Reset() { *m = PollReply{} }
+func (m *PollReply) String() string { return proto.CompactTextString(m) }
+func (*PollReply) ProtoMessage() {}
+
+func (m *PollReply) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type ResolveRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
+func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
+func (*ResolveRequest) ProtoMessage() {}
+
+func (m *ResolveRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily {
+ if m != nil {
+ return m.AddressFamilies
+ }
+ return nil
+}
+
+type ResolveReply struct {
+ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"`
+ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"`
+ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveReply) Reset() { *m = ResolveReply{} }
+func (m *ResolveReply) String() string { return proto.CompactTextString(m) }
+func (*ResolveReply) ProtoMessage() {}
+
+func (m *ResolveReply) GetPackedAddress() [][]byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *ResolveReply) GetCanonicalName() string {
+ if m != nil && m.CanonicalName != nil {
+ return *m.CanonicalName
+ }
+ return ""
+}
+
+func (m *ResolveReply) GetAliases() []string {
+ if m != nil {
+ return m.Aliases
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
new file mode 100644
index 000000000..2fcc7953d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
@@ -0,0 +1,460 @@
+syntax = "proto2";
+option go_package = "socket";
+
+package appengine;
+
+message RemoteSocketServiceError {
+ enum ErrorCode {
+ SYSTEM_ERROR = 1;
+ GAI_ERROR = 2;
+ FAILURE = 4;
+ PERMISSION_DENIED = 5;
+ INVALID_REQUEST = 6;
+ SOCKET_CLOSED = 7;
+ }
+
+ enum SystemError {
+ option allow_alias = true;
+
+ SYS_SUCCESS = 0;
+ SYS_EPERM = 1;
+ SYS_ENOENT = 2;
+ SYS_ESRCH = 3;
+ SYS_EINTR = 4;
+ SYS_EIO = 5;
+ SYS_ENXIO = 6;
+ SYS_E2BIG = 7;
+ SYS_ENOEXEC = 8;
+ SYS_EBADF = 9;
+ SYS_ECHILD = 10;
+ SYS_EAGAIN = 11;
+ SYS_EWOULDBLOCK = 11;
+ SYS_ENOMEM = 12;
+ SYS_EACCES = 13;
+ SYS_EFAULT = 14;
+ SYS_ENOTBLK = 15;
+ SYS_EBUSY = 16;
+ SYS_EEXIST = 17;
+ SYS_EXDEV = 18;
+ SYS_ENODEV = 19;
+ SYS_ENOTDIR = 20;
+ SYS_EISDIR = 21;
+ SYS_EINVAL = 22;
+ SYS_ENFILE = 23;
+ SYS_EMFILE = 24;
+ SYS_ENOTTY = 25;
+ SYS_ETXTBSY = 26;
+ SYS_EFBIG = 27;
+ SYS_ENOSPC = 28;
+ SYS_ESPIPE = 29;
+ SYS_EROFS = 30;
+ SYS_EMLINK = 31;
+ SYS_EPIPE = 32;
+ SYS_EDOM = 33;
+ SYS_ERANGE = 34;
+ SYS_EDEADLK = 35;
+ SYS_EDEADLOCK = 35;
+ SYS_ENAMETOOLONG = 36;
+ SYS_ENOLCK = 37;
+ SYS_ENOSYS = 38;
+ SYS_ENOTEMPTY = 39;
+ SYS_ELOOP = 40;
+ SYS_ENOMSG = 42;
+ SYS_EIDRM = 43;
+ SYS_ECHRNG = 44;
+ SYS_EL2NSYNC = 45;
+ SYS_EL3HLT = 46;
+ SYS_EL3RST = 47;
+ SYS_ELNRNG = 48;
+ SYS_EUNATCH = 49;
+ SYS_ENOCSI = 50;
+ SYS_EL2HLT = 51;
+ SYS_EBADE = 52;
+ SYS_EBADR = 53;
+ SYS_EXFULL = 54;
+ SYS_ENOANO = 55;
+ SYS_EBADRQC = 56;
+ SYS_EBADSLT = 57;
+ SYS_EBFONT = 59;
+ SYS_ENOSTR = 60;
+ SYS_ENODATA = 61;
+ SYS_ETIME = 62;
+ SYS_ENOSR = 63;
+ SYS_ENONET = 64;
+ SYS_ENOPKG = 65;
+ SYS_EREMOTE = 66;
+ SYS_ENOLINK = 67;
+ SYS_EADV = 68;
+ SYS_ESRMNT = 69;
+ SYS_ECOMM = 70;
+ SYS_EPROTO = 71;
+ SYS_EMULTIHOP = 72;
+ SYS_EDOTDOT = 73;
+ SYS_EBADMSG = 74;
+ SYS_EOVERFLOW = 75;
+ SYS_ENOTUNIQ = 76;
+ SYS_EBADFD = 77;
+ SYS_EREMCHG = 78;
+ SYS_ELIBACC = 79;
+ SYS_ELIBBAD = 80;
+ SYS_ELIBSCN = 81;
+ SYS_ELIBMAX = 82;
+ SYS_ELIBEXEC = 83;
+ SYS_EILSEQ = 84;
+ SYS_ERESTART = 85;
+ SYS_ESTRPIPE = 86;
+ SYS_EUSERS = 87;
+ SYS_ENOTSOCK = 88;
+ SYS_EDESTADDRREQ = 89;
+ SYS_EMSGSIZE = 90;
+ SYS_EPROTOTYPE = 91;
+ SYS_ENOPROTOOPT = 92;
+ SYS_EPROTONOSUPPORT = 93;
+ SYS_ESOCKTNOSUPPORT = 94;
+ SYS_EOPNOTSUPP = 95;
+ SYS_ENOTSUP = 95;
+ SYS_EPFNOSUPPORT = 96;
+ SYS_EAFNOSUPPORT = 97;
+ SYS_EADDRINUSE = 98;
+ SYS_EADDRNOTAVAIL = 99;
+ SYS_ENETDOWN = 100;
+ SYS_ENETUNREACH = 101;
+ SYS_ENETRESET = 102;
+ SYS_ECONNABORTED = 103;
+ SYS_ECONNRESET = 104;
+ SYS_ENOBUFS = 105;
+ SYS_EISCONN = 106;
+ SYS_ENOTCONN = 107;
+ SYS_ESHUTDOWN = 108;
+ SYS_ETOOMANYREFS = 109;
+ SYS_ETIMEDOUT = 110;
+ SYS_ECONNREFUSED = 111;
+ SYS_EHOSTDOWN = 112;
+ SYS_EHOSTUNREACH = 113;
+ SYS_EALREADY = 114;
+ SYS_EINPROGRESS = 115;
+ SYS_ESTALE = 116;
+ SYS_EUCLEAN = 117;
+ SYS_ENOTNAM = 118;
+ SYS_ENAVAIL = 119;
+ SYS_EISNAM = 120;
+ SYS_EREMOTEIO = 121;
+ SYS_EDQUOT = 122;
+ SYS_ENOMEDIUM = 123;
+ SYS_EMEDIUMTYPE = 124;
+ SYS_ECANCELED = 125;
+ SYS_ENOKEY = 126;
+ SYS_EKEYEXPIRED = 127;
+ SYS_EKEYREVOKED = 128;
+ SYS_EKEYREJECTED = 129;
+ SYS_EOWNERDEAD = 130;
+ SYS_ENOTRECOVERABLE = 131;
+ SYS_ERFKILL = 132;
+ }
+
+ optional int32 system_error = 1 [default=0];
+ optional string error_detail = 2;
+}
+
+message AddressPort {
+ required int32 port = 1;
+ optional bytes packed_address = 2;
+
+ optional string hostname_hint = 3;
+}
+
+
+
+message CreateSocketRequest {
+ enum SocketFamily {
+ IPv4 = 1;
+ IPv6 = 2;
+ }
+
+ enum SocketProtocol {
+ TCP = 1;
+ UDP = 2;
+ }
+
+ required SocketFamily family = 1;
+ required SocketProtocol protocol = 2;
+
+ repeated SocketOption socket_options = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ optional int32 listen_backlog = 5 [default=0];
+
+ optional AddressPort remote_ip = 6;
+
+ optional string app_id = 9;
+
+ optional int64 project_id = 10;
+}
+
+message CreateSocketReply {
+ optional string socket_descriptor = 1;
+
+ optional AddressPort server_address = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ extensions 1000 to max;
+}
+
+
+
+message BindRequest {
+ required string socket_descriptor = 1;
+ required AddressPort proxy_external_ip = 2;
+}
+
+message BindReply {
+ optional AddressPort proxy_external_ip = 1;
+}
+
+
+
+message GetSocketNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetSocketNameReply {
+ optional AddressPort proxy_external_ip = 2;
+}
+
+
+
+message GetPeerNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetPeerNameReply {
+ optional AddressPort peer_ip = 2;
+}
+
+
+message SocketOption {
+
+ enum SocketOptionLevel {
+ SOCKET_SOL_IP = 0;
+ SOCKET_SOL_SOCKET = 1;
+ SOCKET_SOL_TCP = 6;
+ SOCKET_SOL_UDP = 17;
+ }
+
+ enum SocketOptionName {
+ option allow_alias = true;
+
+ SOCKET_SO_DEBUG = 1;
+ SOCKET_SO_REUSEADDR = 2;
+ SOCKET_SO_TYPE = 3;
+ SOCKET_SO_ERROR = 4;
+ SOCKET_SO_DONTROUTE = 5;
+ SOCKET_SO_BROADCAST = 6;
+ SOCKET_SO_SNDBUF = 7;
+ SOCKET_SO_RCVBUF = 8;
+ SOCKET_SO_KEEPALIVE = 9;
+ SOCKET_SO_OOBINLINE = 10;
+ SOCKET_SO_LINGER = 13;
+ SOCKET_SO_RCVTIMEO = 20;
+ SOCKET_SO_SNDTIMEO = 21;
+
+ SOCKET_IP_TOS = 1;
+ SOCKET_IP_TTL = 2;
+ SOCKET_IP_HDRINCL = 3;
+ SOCKET_IP_OPTIONS = 4;
+
+ SOCKET_TCP_NODELAY = 1;
+ SOCKET_TCP_MAXSEG = 2;
+ SOCKET_TCP_CORK = 3;
+ SOCKET_TCP_KEEPIDLE = 4;
+ SOCKET_TCP_KEEPINTVL = 5;
+ SOCKET_TCP_KEEPCNT = 6;
+ SOCKET_TCP_SYNCNT = 7;
+ SOCKET_TCP_LINGER2 = 8;
+ SOCKET_TCP_DEFER_ACCEPT = 9;
+ SOCKET_TCP_WINDOW_CLAMP = 10;
+ SOCKET_TCP_INFO = 11;
+ SOCKET_TCP_QUICKACK = 12;
+ }
+
+ required SocketOptionLevel level = 1;
+ required SocketOptionName option = 2;
+ required bytes value = 3;
+}
+
+
+message SetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message SetSocketOptionsReply {
+}
+
+message GetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message GetSocketOptionsReply {
+ repeated SocketOption options = 2;
+}
+
+
+message ConnectRequest {
+ required string socket_descriptor = 1;
+ required AddressPort remote_ip = 2;
+ optional double timeout_seconds = 3 [default=-1];
+}
+
+message ConnectReply {
+ optional AddressPort proxy_external_ip = 1;
+
+ extensions 1000 to max;
+}
+
+
+message ListenRequest {
+ required string socket_descriptor = 1;
+ required int32 backlog = 2;
+}
+
+message ListenReply {
+}
+
+
+message AcceptRequest {
+ required string socket_descriptor = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message AcceptReply {
+ optional bytes new_socket_descriptor = 2;
+ optional AddressPort remote_address = 3;
+}
+
+
+
+message ShutDownRequest {
+ enum How {
+ SOCKET_SHUT_RD = 1;
+ SOCKET_SHUT_WR = 2;
+ SOCKET_SHUT_RDWR = 3;
+ }
+ required string socket_descriptor = 1;
+ required How how = 2;
+ required int64 send_offset = 3;
+}
+
+message ShutDownReply {
+}
+
+
+
+message CloseRequest {
+ required string socket_descriptor = 1;
+ optional int64 send_offset = 2 [default=-1];
+}
+
+message CloseReply {
+}
+
+
+
+message SendRequest {
+ required string socket_descriptor = 1;
+ required bytes data = 2 [ctype=CORD];
+ required int64 stream_offset = 3;
+ optional int32 flags = 4 [default=0];
+ optional AddressPort send_to = 5;
+ optional double timeout_seconds = 6 [default=-1];
+}
+
+message SendReply {
+ optional int32 data_sent = 1;
+}
+
+
+message ReceiveRequest {
+ enum Flags {
+ MSG_OOB = 1;
+ MSG_PEEK = 2;
+ }
+ required string socket_descriptor = 1;
+ required int32 data_size = 2;
+ optional int32 flags = 3 [default=0];
+ optional double timeout_seconds = 5 [default=-1];
+}
+
+message ReceiveReply {
+ optional int64 stream_offset = 2;
+ optional bytes data = 3 [ctype=CORD];
+ optional AddressPort received_from = 4;
+ optional int32 buffer_size = 5;
+}
+
+
+
+message PollEvent {
+
+ enum PollEventFlag {
+ SOCKET_POLLNONE = 0;
+ SOCKET_POLLIN = 1;
+ SOCKET_POLLPRI = 2;
+ SOCKET_POLLOUT = 4;
+ SOCKET_POLLERR = 8;
+ SOCKET_POLLHUP = 16;
+ SOCKET_POLLNVAL = 32;
+ SOCKET_POLLRDNORM = 64;
+ SOCKET_POLLRDBAND = 128;
+ SOCKET_POLLWRNORM = 256;
+ SOCKET_POLLWRBAND = 512;
+ SOCKET_POLLMSG = 1024;
+ SOCKET_POLLREMOVE = 4096;
+ SOCKET_POLLRDHUP = 8192;
+ };
+
+ required string socket_descriptor = 1;
+ required int32 requested_events = 2;
+ required int32 observed_events = 3;
+}
+
+message PollRequest {
+ repeated PollEvent events = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message PollReply {
+ repeated PollEvent events = 2;
+}
+
+message ResolveRequest {
+ required string name = 1;
+ repeated CreateSocketRequest.SocketFamily address_families = 2;
+}
+
+message ResolveReply {
+ enum ErrorCode {
+ SOCKET_EAI_ADDRFAMILY = 1;
+ SOCKET_EAI_AGAIN = 2;
+ SOCKET_EAI_BADFLAGS = 3;
+ SOCKET_EAI_FAIL = 4;
+ SOCKET_EAI_FAMILY = 5;
+ SOCKET_EAI_MEMORY = 6;
+ SOCKET_EAI_NODATA = 7;
+ SOCKET_EAI_NONAME = 8;
+ SOCKET_EAI_SERVICE = 9;
+ SOCKET_EAI_SOCKTYPE = 10;
+ SOCKET_EAI_SYSTEM = 11;
+ SOCKET_EAI_BADHINTS = 12;
+ SOCKET_EAI_PROTOCOL = 13;
+ SOCKET_EAI_OVERFLOW = 14;
+ SOCKET_EAI_MAX = 15;
+ };
+
+ repeated bytes packed_address = 2;
+ optional string canonical_name = 3;
+ repeated string aliases = 4;
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.pb.go b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
new file mode 100644
index 000000000..56cc3f805
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
@@ -0,0 +1,198 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/system/system_service.proto
+// DO NOT EDIT!
+
+/*
+Package system is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/system/system_service.proto
+
+It has these top-level messages:
+ SystemServiceError
+ SystemStat
+ GetSystemStatsRequest
+ GetSystemStatsResponse
+ StartBackgroundRequestRequest
+ StartBackgroundRequestResponse
+*/
+package system
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SystemServiceError_ErrorCode int32
+
+const (
+ SystemServiceError_OK SystemServiceError_ErrorCode = 0
+ SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1
+ SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2
+ SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3
+)
+
+var SystemServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BACKEND_REQUIRED",
+ 3: "LIMIT_REACHED",
+}
+var SystemServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BACKEND_REQUIRED": 2,
+ "LIMIT_REACHED": 3,
+}
+
+func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode {
+ p := new(SystemServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SystemServiceError_ErrorCode) String() string {
+ return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x))
+}
+func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SystemServiceError_ErrorCode(value)
+ return nil
+}
+
+type SystemServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemServiceError) Reset() { *m = SystemServiceError{} }
+func (m *SystemServiceError) String() string { return proto.CompactTextString(m) }
+func (*SystemServiceError) ProtoMessage() {}
+
+type SystemStat struct {
+ // Instaneous value of this stat.
+ Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"`
+ // Average over time, if this stat has an instaneous value.
+ Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"`
+ Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"`
+ // Total value, if the stat accumulates over time.
+ Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"`
+ // Rate over time, if this stat accumulates.
+ Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"`
+ Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemStat) Reset() { *m = SystemStat{} }
+func (m *SystemStat) String() string { return proto.CompactTextString(m) }
+func (*SystemStat) ProtoMessage() {}
+
+func (m *SystemStat) GetCurrent() float64 {
+ if m != nil && m.Current != nil {
+ return *m.Current
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage1M() float64 {
+ if m != nil && m.Average1M != nil {
+ return *m.Average1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage10M() float64 {
+ if m != nil && m.Average10M != nil {
+ return *m.Average10M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetTotal() float64 {
+ if m != nil && m.Total != nil {
+ return *m.Total
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate1M() float64 {
+ if m != nil && m.Rate1M != nil {
+ return *m.Rate1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate10M() float64 {
+ if m != nil && m.Rate10M != nil {
+ return *m.Rate10M
+ }
+ return 0
+}
+
+type GetSystemStatsRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} }
+func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsRequest) ProtoMessage() {}
+
+type GetSystemStatsResponse struct {
+ // CPU used by this instance, in mcycles.
+ Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"`
+ // Physical memory (RAM) used by this instance, in megabytes.
+ Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} }
+func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsResponse) ProtoMessage() {}
+
+func (m *GetSystemStatsResponse) GetCpu() *SystemStat {
+ if m != nil {
+ return m.Cpu
+ }
+ return nil
+}
+
+func (m *GetSystemStatsResponse) GetMemory() *SystemStat {
+ if m != nil {
+ return m.Memory
+ }
+ return nil
+}
+
+type StartBackgroundRequestRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} }
+func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestRequest) ProtoMessage() {}
+
+type StartBackgroundRequestResponse struct {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} }
+func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestResponse) ProtoMessage() {}
+
+func (m *StartBackgroundRequestResponse) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.proto b/vendor/google.golang.org/appengine/internal/system/system_service.proto
new file mode 100644
index 000000000..32c0bf859
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.proto
@@ -0,0 +1,49 @@
+syntax = "proto2";
+option go_package = "system";
+
+package appengine;
+
+message SystemServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BACKEND_REQUIRED = 2;
+ LIMIT_REACHED = 3;
+ }
+}
+
+message SystemStat {
+ // Instaneous value of this stat.
+ optional double current = 1;
+
+ // Average over time, if this stat has an instaneous value.
+ optional double average1m = 3;
+ optional double average10m = 4;
+
+ // Total value, if the stat accumulates over time.
+ optional double total = 2;
+
+ // Rate over time, if this stat accumulates.
+ optional double rate1m = 5;
+ optional double rate10m = 6;
+}
+
+message GetSystemStatsRequest {
+}
+
+message GetSystemStatsResponse {
+ // CPU used by this instance, in mcycles.
+ optional SystemStat cpu = 1;
+
+ // Physical memory (RAM) used by this instance, in megabytes.
+ optional SystemStat memory = 2;
+}
+
+message StartBackgroundRequestRequest {
+}
+
+message StartBackgroundRequestResponse {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ optional string request_id = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
new file mode 100644
index 000000000..c3d428ec5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
@@ -0,0 +1,1888 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+// DO NOT EDIT!
+
+/*
+Package taskqueue is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+
+It has these top-level messages:
+ TaskQueueServiceError
+ TaskPayload
+ TaskQueueRetryParameters
+ TaskQueueAcl
+ TaskQueueHttpHeader
+ TaskQueueMode
+ TaskQueueAddRequest
+ TaskQueueAddResponse
+ TaskQueueBulkAddRequest
+ TaskQueueBulkAddResponse
+ TaskQueueDeleteRequest
+ TaskQueueDeleteResponse
+ TaskQueueForceRunRequest
+ TaskQueueForceRunResponse
+ TaskQueueUpdateQueueRequest
+ TaskQueueUpdateQueueResponse
+ TaskQueueFetchQueuesRequest
+ TaskQueueFetchQueuesResponse
+ TaskQueueFetchQueueStatsRequest
+ TaskQueueScannerQueueInfo
+ TaskQueueFetchQueueStatsResponse
+ TaskQueuePauseQueueRequest
+ TaskQueuePauseQueueResponse
+ TaskQueuePurgeQueueRequest
+ TaskQueuePurgeQueueResponse
+ TaskQueueDeleteQueueRequest
+ TaskQueueDeleteQueueResponse
+ TaskQueueDeleteGroupRequest
+ TaskQueueDeleteGroupResponse
+ TaskQueueQueryTasksRequest
+ TaskQueueQueryTasksResponse
+ TaskQueueFetchTaskRequest
+ TaskQueueFetchTaskResponse
+ TaskQueueUpdateStorageLimitRequest
+ TaskQueueUpdateStorageLimitResponse
+ TaskQueueQueryAndOwnTasksRequest
+ TaskQueueQueryAndOwnTasksResponse
+ TaskQueueModifyTaskLeaseRequest
+ TaskQueueModifyTaskLeaseResponse
+*/
+package taskqueue
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import appengine "google.golang.org/appengine/internal/datastore"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type TaskQueueServiceError_ErrorCode int32
+
+const (
+ TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0
+ TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1
+ TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2
+ TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3
+ TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4
+ TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5
+ TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6
+ TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7
+ TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8
+ TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9
+ TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10
+ TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11
+ TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12
+ TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13
+ TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14
+ TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15
+ TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16
+ TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17
+ TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18
+ TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19
+ TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20
+ TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21
+ TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22
+ TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23
+ TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24
+ TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25
+ TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26
+ TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000
+)
+
+var TaskQueueServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNKNOWN_QUEUE",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "TASK_TOO_LARGE",
+ 5: "INVALID_TASK_NAME",
+ 6: "INVALID_QUEUE_NAME",
+ 7: "INVALID_URL",
+ 8: "INVALID_QUEUE_RATE",
+ 9: "PERMISSION_DENIED",
+ 10: "TASK_ALREADY_EXISTS",
+ 11: "TOMBSTONED_TASK",
+ 12: "INVALID_ETA",
+ 13: "INVALID_REQUEST",
+ 14: "UNKNOWN_TASK",
+ 15: "TOMBSTONED_QUEUE",
+ 16: "DUPLICATE_TASK_NAME",
+ 17: "SKIPPED",
+ 18: "TOO_MANY_TASKS",
+ 19: "INVALID_PAYLOAD",
+ 20: "INVALID_RETRY_PARAMETERS",
+ 21: "INVALID_QUEUE_MODE",
+ 22: "ACL_LOOKUP_ERROR",
+ 23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
+ 24: "INCORRECT_CREATOR_NAME",
+ 25: "TASK_LEASE_EXPIRED",
+ 26: "QUEUE_PAUSED",
+ 27: "INVALID_TAG",
+ 10000: "DATASTORE_ERROR",
+}
+var TaskQueueServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNKNOWN_QUEUE": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "TASK_TOO_LARGE": 4,
+ "INVALID_TASK_NAME": 5,
+ "INVALID_QUEUE_NAME": 6,
+ "INVALID_URL": 7,
+ "INVALID_QUEUE_RATE": 8,
+ "PERMISSION_DENIED": 9,
+ "TASK_ALREADY_EXISTS": 10,
+ "TOMBSTONED_TASK": 11,
+ "INVALID_ETA": 12,
+ "INVALID_REQUEST": 13,
+ "UNKNOWN_TASK": 14,
+ "TOMBSTONED_QUEUE": 15,
+ "DUPLICATE_TASK_NAME": 16,
+ "SKIPPED": 17,
+ "TOO_MANY_TASKS": 18,
+ "INVALID_PAYLOAD": 19,
+ "INVALID_RETRY_PARAMETERS": 20,
+ "INVALID_QUEUE_MODE": 21,
+ "ACL_LOOKUP_ERROR": 22,
+ "TRANSACTIONAL_REQUEST_TOO_LARGE": 23,
+ "INCORRECT_CREATOR_NAME": 24,
+ "TASK_LEASE_EXPIRED": 25,
+ "QUEUE_PAUSED": 26,
+ "INVALID_TAG": 27,
+ "DATASTORE_ERROR": 10000,
+}
+
+func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {
+ p := new(TaskQueueServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x TaskQueueServiceError_ErrorCode) String() string {
+ return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))
+}
+func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueServiceError_ErrorCode(value)
+ return nil
+}
+
+type TaskQueueMode_Mode int32
+
+const (
+ TaskQueueMode_PUSH TaskQueueMode_Mode = 0
+ TaskQueueMode_PULL TaskQueueMode_Mode = 1
+)
+
+var TaskQueueMode_Mode_name = map[int32]string{
+ 0: "PUSH",
+ 1: "PULL",
+}
+var TaskQueueMode_Mode_value = map[string]int32{
+ "PUSH": 0,
+ "PULL": 1,
+}
+
+func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {
+ p := new(TaskQueueMode_Mode)
+ *p = x
+ return p
+}
+func (x TaskQueueMode_Mode) String() string {
+ return proto.EnumName(TaskQueueMode_Mode_name, int32(x))
+}
+func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueMode_Mode(value)
+ return nil
+}
+
+type TaskQueueAddRequest_RequestMethod int32
+
+const (
+ TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1
+ TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2
+ TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3
+ TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4
+ TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5
+)
+
+var TaskQueueAddRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueAddRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {
+ p := new(TaskQueueAddRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueAddRequest_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueAddRequest_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RequestMethod int32
+
+const (
+ TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1
+ TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2
+ TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3
+ TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4
+ TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5
+)
+
+var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {
+ p := new(TaskQueueQueryTasksResponse_Task_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} }
+func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueServiceError) ProtoMessage() {}
+
+type TaskPayload struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskPayload) Reset() { *m = TaskPayload{} }
+func (m *TaskPayload) String() string { return proto.CompactTextString(m) }
+func (*TaskPayload) ProtoMessage() {}
+
+func (m *TaskPayload) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *TaskPayload) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *TaskPayload) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *TaskPayload) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*TaskPayload)(nil)
+var _ proto.Unmarshaler = (*TaskPayload)(nil)
+
+var extRange_TaskPayload = []proto.ExtensionRange{
+ {10, 2147483646},
+}
+
+func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_TaskPayload
+}
+func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type TaskQueueRetryParameters struct {
+ RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"`
+ AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"`
+ MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"`
+ MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"`
+ MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} }
+func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueRetryParameters) ProtoMessage() {}
+
+const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1
+const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600
+const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16
+
+func (m *TaskQueueRetryParameters) GetRetryLimit() int32 {
+ if m != nil && m.RetryLimit != nil {
+ return *m.RetryLimit
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {
+ if m != nil && m.AgeLimitSec != nil {
+ return *m.AgeLimitSec
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {
+ if m != nil && m.MinBackoffSec != nil {
+ return *m.MinBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MinBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {
+ if m != nil && m.MaxBackoffSec != nil {
+ return *m.MaxBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MaxBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {
+ if m != nil && m.MaxDoublings != nil {
+ return *m.MaxDoublings
+ }
+ return Default_TaskQueueRetryParameters_MaxDoublings
+}
+
+type TaskQueueAcl struct {
+ UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"`
+ WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} }
+func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAcl) ProtoMessage() {}
+
+func (m *TaskQueueAcl) GetUserEmail() [][]byte {
+ if m != nil {
+ return m.UserEmail
+ }
+ return nil
+}
+
+func (m *TaskQueueAcl) GetWriterEmail() [][]byte {
+ if m != nil {
+ return m.WriterEmail
+ }
+ return nil
+}
+
+type TaskQueueHttpHeader struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} }
+func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueHttpHeader) ProtoMessage() {}
+
+func (m *TaskQueueHttpHeader) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueHttpHeader) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueMode struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} }
+func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueMode) ProtoMessage() {}
+
+type TaskQueueAddRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"`
+ Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"`
+ Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"`
+ AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"`
+ Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} }
+func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest) ProtoMessage() {}
+
+const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST
+const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueAddRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return Default_TaskQueueAddRequest_Method
+}
+
+func (m *TaskQueueAddRequest) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueAddRequest_Mode
+}
+
+func (m *TaskQueueAddRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_Header struct {
+ Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} }
+func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_Header) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} }
+func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueAddResponse struct {
+ ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} }
+func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueAddResponse) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueBulkAddRequest struct {
+ AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} }
+func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddRequest) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {
+ if m != nil {
+ return m.AddRequest
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse struct {
+ Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} }
+func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {
+ if m != nil {
+ return m.Taskresult
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse_TaskResult struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} }
+func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueDeleteRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} }
+func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteResponse struct {
+ Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} }
+func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteResponse) ProtoMessage() {}
+
+func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+type TaskQueueForceRunRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} }
+func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunRequest) ProtoMessage() {}
+
+func (m *TaskQueueForceRunRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueForceRunResponse struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} }
+func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunResponse) ProtoMessage() {}
+
+func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+type TaskQueueUpdateQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} }
+func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueRequest) ProtoMessage() {}
+
+const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueUpdateQueueRequest_Mode
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+type TaskQueueUpdateQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} }
+func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueResponse) ProtoMessage() {}
+
+type TaskQueueFetchQueuesRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} }
+func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return 0
+}
+
+type TaskQueueFetchQueuesResponse struct {
+ Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} }
+func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {
+ if m != nil {
+ return m.Queue
+ }
+ return nil
+}
+
+type TaskQueueFetchQueuesResponse_Queue struct {
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"`
+ CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} }
+func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false
+const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting"
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {
+ if m != nil && m.Paused != nil {
+ return *m.Paused
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Paused
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Mode
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {
+ if m != nil && m.CreatorName != nil {
+ return *m.CreatorName
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName
+}
+
+type TaskQueueFetchQueueStatsRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"`
+ MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} }
+func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0
+
+func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {
+ if m != nil && m.MaxNumTasks != nil {
+ return *m.MaxNumTasks
+ }
+ return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks
+}
+
+type TaskQueueScannerQueueInfo struct {
+ ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"`
+ ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"`
+ SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"`
+ RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"`
+ EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} }
+func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueScannerQueueInfo) ProtoMessage() {}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {
+ if m != nil && m.ExecutedLastMinute != nil {
+ return *m.ExecutedLastMinute
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {
+ if m != nil && m.ExecutedLastHour != nil {
+ return *m.ExecutedLastHour
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {
+ if m != nil && m.SamplingDurationSeconds != nil {
+ return *m.SamplingDurationSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {
+ if m != nil && m.RequestsInFlight != nil {
+ return *m.RequestsInFlight
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {
+ if m != nil && m.EnforcedRate != nil {
+ return *m.EnforcedRate
+ }
+ return 0
+}
+
+type TaskQueueFetchQueueStatsResponse struct {
+ Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} }
+func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {
+ if m != nil {
+ return m.Queuestats
+ }
+ return nil
+}
+
+type TaskQueueFetchQueueStatsResponse_QueueStats struct {
+ NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"`
+ OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"`
+ ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {
+ *m = TaskQueueFetchQueueStatsResponse_QueueStats{}
+}
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {
+ if m != nil && m.NumTasks != nil {
+ return *m.NumTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {
+ if m != nil && m.OldestEtaUsec != nil {
+ return *m.OldestEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {
+ if m != nil {
+ return m.ScannerInfo
+ }
+ return nil
+}
+
+type TaskQueuePauseQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} }
+func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePauseQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetPause() bool {
+ if m != nil && m.Pause != nil {
+ return *m.Pause
+ }
+ return false
+}
+
+type TaskQueuePauseQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} }
+func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueResponse) ProtoMessage() {}
+
+type TaskQueuePurgeQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} }
+func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueuePurgeQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} }
+func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} }
+func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueueDeleteQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} }
+func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteGroupRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} }
+func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteGroupResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} }
+func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupResponse) ProtoMessage() {}
+
+type TaskQueueQueryTasksRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"`
+ StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"`
+ StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"`
+ MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} }
+func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1
+
+func (m *TaskQueueQueryTasksRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {
+ if m != nil {
+ return m.StartTaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {
+ if m != nil && m.StartEtaUsec != nil {
+ return *m.StartEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {
+ if m != nil {
+ return m.StartTag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return Default_TaskQueueQueryTasksRequest_MaxRows
+}
+
+type TaskQueueQueryTasksResponse struct {
+ Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} }
+func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"`
+ RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"`
+ BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"`
+ Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"`
+ CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"`
+ Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"`
+ Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"`
+ Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"`
+ ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} }
+func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0
+const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return TaskQueueQueryTasksResponse_Task_GET
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {
+ if m != nil && m.BodySize != nil {
+ return *m.BodySize
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {
+ if m != nil && m.CreationTimeUsec != nil {
+ return *m.CreationTimeUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {
+ if m != nil {
+ return m.Runlog
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {
+ if m != nil && m.FirstTryUsec != nil {
+ return *m.FirstTryUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {
+ if m != nil && m.ExecutionCount != nil {
+ return *m.ExecutionCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount
+}
+
+type TaskQueueQueryTasksResponse_Task_Header struct {
+ Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_Header{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_CronTimetable{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RunLog struct {
+ DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"`
+ LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"`
+ ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"`
+ ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"`
+ RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_RunLog{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {
+ if m != nil && m.DispatchedUsec != nil {
+ return *m.DispatchedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {
+ if m != nil && m.LagUsec != nil {
+ return *m.LagUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {
+ if m != nil && m.ElapsedUsec != nil {
+ return *m.ElapsedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {
+ if m != nil && m.ResponseCode != nil {
+ return *m.ResponseCode
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {
+ if m != nil && m.RetryReason != nil {
+ return *m.RetryReason
+ }
+ return ""
+}
+
+type TaskQueueFetchTaskRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} }
+func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueFetchTaskResponse struct {
+ Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} }
+func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueUpdateStorageLimitRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} }
+func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type TaskQueueUpdateStorageLimitResponse struct {
+ NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} }
+func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {
+ if m != nil && m.NewLimit != nil {
+ return *m.NewLimit
+ }
+ return 0
+}
+
+type TaskQueueQueryAndOwnTasksRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"`
+ GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"`
+ Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} }
+func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {
+ if m != nil && m.MaxTasks != nil {
+ return *m.MaxTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {
+ if m != nil && m.GroupByTag != nil {
+ return *m.GroupByTag
+ }
+ return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse struct {
+ Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} }
+func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"`
+ Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {
+ *m = TaskQueueQueryAndOwnTasksResponse_Task{}
+}
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueModifyTaskLeaseRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} }
+func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+type TaskQueueModifyTaskLeaseResponse struct {
+ UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} }
+func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {
+ if m != nil && m.UpdatedEtaUsec != nil {
+ return *m.UpdatedEtaUsec
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
new file mode 100644
index 000000000..419aaf570
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
@@ -0,0 +1,342 @@
+syntax = "proto2";
+option go_package = "taskqueue";
+
+import "google.golang.org/appengine/internal/datastore/datastore_v3.proto";
+
+package appengine;
+
+message TaskQueueServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNKNOWN_QUEUE = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ TASK_TOO_LARGE = 4;
+ INVALID_TASK_NAME = 5;
+ INVALID_QUEUE_NAME = 6;
+ INVALID_URL = 7;
+ INVALID_QUEUE_RATE = 8;
+ PERMISSION_DENIED = 9;
+ TASK_ALREADY_EXISTS = 10;
+ TOMBSTONED_TASK = 11;
+ INVALID_ETA = 12;
+ INVALID_REQUEST = 13;
+ UNKNOWN_TASK = 14;
+ TOMBSTONED_QUEUE = 15;
+ DUPLICATE_TASK_NAME = 16;
+ SKIPPED = 17;
+ TOO_MANY_TASKS = 18;
+ INVALID_PAYLOAD = 19;
+ INVALID_RETRY_PARAMETERS = 20;
+ INVALID_QUEUE_MODE = 21;
+ ACL_LOOKUP_ERROR = 22;
+ TRANSACTIONAL_REQUEST_TOO_LARGE = 23;
+ INCORRECT_CREATOR_NAME = 24;
+ TASK_LEASE_EXPIRED = 25;
+ QUEUE_PAUSED = 26;
+ INVALID_TAG = 27;
+
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ DATASTORE_ERROR = 10000;
+ }
+}
+
+message TaskPayload {
+ extensions 10 to max;
+ option message_set_wire_format = true;
+}
+
+message TaskQueueRetryParameters {
+ optional int32 retry_limit = 1;
+ optional int64 age_limit_sec = 2;
+
+ optional double min_backoff_sec = 3 [default = 0.1];
+ optional double max_backoff_sec = 4 [default = 3600];
+ optional int32 max_doublings = 5 [default = 16];
+}
+
+message TaskQueueAcl {
+ repeated bytes user_email = 1;
+ repeated bytes writer_email = 2;
+}
+
+message TaskQueueHttpHeader {
+ required bytes key = 1;
+ required bytes value = 2;
+}
+
+message TaskQueueMode {
+ enum Mode {
+ PUSH = 0;
+ PULL = 1;
+ }
+}
+
+message TaskQueueAddRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5 [default=POST];
+
+ optional bytes url = 4;
+
+ repeated group Header = 6 {
+ required bytes key = 7;
+ required bytes value = 8;
+ }
+
+ optional bytes body = 9 [ctype=CORD];
+ optional Transaction transaction = 10;
+ optional bytes app_id = 11;
+
+ optional group CronTimetable = 12 {
+ required bytes schedule = 13;
+ required bytes timezone = 14;
+ }
+
+ optional bytes description = 15;
+ optional TaskPayload payload = 16;
+ optional TaskQueueRetryParameters retry_parameters = 17;
+ optional TaskQueueMode.Mode mode = 18 [default=PUSH];
+ optional bytes tag = 19;
+}
+
+message TaskQueueAddResponse {
+ optional bytes chosen_task_name = 1;
+}
+
+message TaskQueueBulkAddRequest {
+ repeated TaskQueueAddRequest add_request = 1;
+}
+
+message TaskQueueBulkAddResponse {
+ repeated group TaskResult = 1 {
+ required TaskQueueServiceError.ErrorCode result = 2;
+ optional bytes chosen_task_name = 3;
+ }
+}
+
+message TaskQueueDeleteRequest {
+ required bytes queue_name = 1;
+ repeated bytes task_name = 2;
+ optional bytes app_id = 3;
+}
+
+message TaskQueueDeleteResponse {
+ repeated TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueForceRunRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueForceRunResponse {
+ required TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueUpdateQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required int32 bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ optional TaskQueueRetryParameters retry_parameters = 6;
+ optional int32 max_concurrent_requests = 7;
+ optional TaskQueueMode.Mode mode = 8 [default = PUSH];
+ optional TaskQueueAcl acl = 9;
+ repeated TaskQueueHttpHeader header_override = 10;
+}
+
+message TaskQueueUpdateQueueResponse {
+}
+
+message TaskQueueFetchQueuesRequest {
+ optional bytes app_id = 1;
+ required int32 max_rows = 2;
+}
+
+message TaskQueueFetchQueuesResponse {
+ repeated group Queue = 1 {
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required double bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ required bool paused = 6 [default=false];
+ optional TaskQueueRetryParameters retry_parameters = 7;
+ optional int32 max_concurrent_requests = 8;
+ optional TaskQueueMode.Mode mode = 9 [default = PUSH];
+ optional TaskQueueAcl acl = 10;
+ repeated TaskQueueHttpHeader header_override = 11;
+ optional string creator_name = 12 [ctype=CORD, default="apphosting"];
+ }
+}
+
+message TaskQueueFetchQueueStatsRequest {
+ optional bytes app_id = 1;
+ repeated bytes queue_name = 2;
+ optional int32 max_num_tasks = 3 [default = 0];
+}
+
+message TaskQueueScannerQueueInfo {
+ required int64 executed_last_minute = 1;
+ required int64 executed_last_hour = 2;
+ required double sampling_duration_seconds = 3;
+ optional int32 requests_in_flight = 4;
+ optional double enforced_rate = 5;
+}
+
+message TaskQueueFetchQueueStatsResponse {
+ repeated group QueueStats = 1 {
+ required int32 num_tasks = 2;
+ required int64 oldest_eta_usec = 3;
+ optional TaskQueueScannerQueueInfo scanner_info = 4;
+ }
+}
+message TaskQueuePauseQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bool pause = 3;
+}
+
+message TaskQueuePauseQueueResponse {
+}
+
+message TaskQueuePurgeQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueuePurgeQueueResponse {
+}
+
+message TaskQueueDeleteQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueueDeleteQueueResponse {
+}
+
+message TaskQueueDeleteGroupRequest {
+ required bytes app_id = 1;
+}
+
+message TaskQueueDeleteGroupResponse {
+}
+
+message TaskQueueQueryTasksRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+
+ optional bytes start_task_name = 3;
+ optional int64 start_eta_usec = 4;
+ optional bytes start_tag = 6;
+ optional int32 max_rows = 5 [default = 1];
+}
+
+message TaskQueueQueryTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional bytes url = 4;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5;
+
+ optional int32 retry_count = 6 [default=0];
+
+ repeated group Header = 7 {
+ required bytes key = 8;
+ required bytes value = 9;
+ }
+
+ optional int32 body_size = 10;
+ optional bytes body = 11 [ctype=CORD];
+ required int64 creation_time_usec = 12;
+
+ optional group CronTimetable = 13 {
+ required bytes schedule = 14;
+ required bytes timezone = 15;
+ }
+
+ optional group RunLog = 16 {
+ required int64 dispatched_usec = 17;
+ required int64 lag_usec = 18;
+ required int64 elapsed_usec = 19;
+ optional int64 response_code = 20;
+ optional string retry_reason = 27;
+ }
+
+ optional bytes description = 21;
+ optional TaskPayload payload = 22;
+ optional TaskQueueRetryParameters retry_parameters = 23;
+ optional int64 first_try_usec = 24;
+ optional bytes tag = 25;
+ optional int32 execution_count = 26 [default=0];
+ }
+}
+
+message TaskQueueFetchTaskRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueFetchTaskResponse {
+ required TaskQueueQueryTasksResponse task = 1;
+}
+
+message TaskQueueUpdateStorageLimitRequest {
+ required bytes app_id = 1;
+ required int64 limit = 2;
+}
+
+message TaskQueueUpdateStorageLimitResponse {
+ required int64 new_limit = 1;
+}
+
+message TaskQueueQueryAndOwnTasksRequest {
+ required bytes queue_name = 1;
+ required double lease_seconds = 2;
+ required int64 max_tasks = 3;
+ optional bool group_by_tag = 4 [default=false];
+ optional bytes tag = 5;
+}
+
+message TaskQueueQueryAndOwnTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional int32 retry_count = 4 [default=0];
+ optional bytes body = 5 [ctype=CORD];
+ optional bytes tag = 6;
+ }
+}
+
+message TaskQueueModifyTaskLeaseRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ required double lease_seconds = 4;
+}
+
+message TaskQueueModifyTaskLeaseResponse {
+ required int64 updated_eta_usec = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 000000000..28a6d1812
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+ if transactionFromContext(c) != nil {
+ return errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 000000000..af463fbb2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 000000000..f695edf6a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 000000000..6b52ffcce
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,289 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+ UserServiceError
+ CreateLoginURLRequest
+ CreateLoginURLResponse
+ CreateLogoutURLRequest
+ CreateLogoutURLResponse
+ GetOAuthUserRequest
+ GetOAuthUserResponse
+ CheckOAuthSignatureRequest
+ CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+ UserServiceError_OK UserServiceError_ErrorCode = 0
+ UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+ UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2
+ UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3
+ UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+ UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "REDIRECT_URL_TOO_LONG",
+ 2: "NOT_ALLOWED",
+ 3: "OAUTH_INVALID_TOKEN",
+ 4: "OAUTH_INVALID_REQUEST",
+ 5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "REDIRECT_URL_TOO_LONG": 1,
+ "NOT_ALLOWED": 2,
+ "OAUTH_INVALID_TOKEN": 3,
+ "OAUTH_INVALID_REQUEST": 4,
+ "OAUTH_ERROR": 5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+ p := new(UserServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+ return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = UserServiceError_ErrorCode(value)
+ return nil
+}
+
+type UserServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset() { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage() {}
+
+type CreateLoginURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage() {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+type CreateLoginURLResponse struct {
+ LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage() {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+ if m != nil && m.LoginUrl != nil {
+ return *m.LoginUrl
+ }
+ return ""
+}
+
+type CreateLogoutURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage() {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+type CreateLogoutURLResponse struct {
+ LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage() {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+ if m != nil && m.LogoutUrl != nil {
+ return *m.LogoutUrl
+ }
+ return ""
+}
+
+type GetOAuthUserRequest struct {
+ Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage() {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+ if m != nil && m.Scope != nil {
+ return *m.Scope
+ }
+ return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type GetOAuthUserResponse struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+ AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+ UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+ IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+ ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+ Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage() {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+ if m != nil && m.UserId != nil {
+ return *m.UserId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+ if m != nil && m.UserOrganization != nil {
+ return *m.UserOrganization
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+ if m != nil && m.IsAdmin != nil {
+ return *m.IsAdmin
+ }
+ return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+ if m != nil && m.ClientId != nil {
+ return *m.ClientId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage() {}
+
+type CheckOAuthSignatureResponse struct {
+ OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage() {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+ if m != nil && m.OauthConsumerKey != nil {
+ return *m.OauthConsumerKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 000000000..f3e969346
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+ enum ErrorCode {
+ OK = 0;
+ REDIRECT_URL_TOO_LONG = 1;
+ NOT_ALLOWED = 2;
+ OAUTH_INVALID_TOKEN = 3;
+ OAUTH_INVALID_REQUEST = 4;
+ OAUTH_ERROR = 5;
+ }
+}
+
+message CreateLoginURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+ optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+ required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+ required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+ optional string scope = 1;
+
+ repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+ required string email = 1;
+ required string user_id = 2;
+ required string auth_domain = 3;
+ optional string user_organization = 4 [default = ""];
+ optional bool is_admin = 5 [default = false];
+ optional string client_id = 6 [default = ""];
+
+ repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+ required string oauth_consumer_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
new file mode 100644
index 000000000..6d5b0ae65
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
@@ -0,0 +1,427 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+// DO NOT EDIT!
+
+/*
+Package xmpp is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+
+It has these top-level messages:
+ XmppServiceError
+ PresenceRequest
+ PresenceResponse
+ BulkPresenceRequest
+ BulkPresenceResponse
+ XmppMessageRequest
+ XmppMessageResponse
+ XmppSendPresenceRequest
+ XmppSendPresenceResponse
+ XmppInviteRequest
+ XmppInviteResponse
+*/
+package xmpp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type XmppServiceError_ErrorCode int32
+
+const (
+ XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1
+ XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2
+ XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3
+ XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4
+ XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5
+ XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6
+ XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7
+ XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8
+ XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9
+)
+
+var XmppServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "INVALID_JID",
+ 3: "NO_BODY",
+ 4: "INVALID_XML",
+ 5: "INVALID_TYPE",
+ 6: "INVALID_SHOW",
+ 7: "EXCEEDED_MAX_SIZE",
+ 8: "APPID_ALIAS_REQUIRED",
+ 9: "NONDEFAULT_MODULE",
+}
+var XmppServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "INVALID_JID": 2,
+ "NO_BODY": 3,
+ "INVALID_XML": 4,
+ "INVALID_TYPE": 5,
+ "INVALID_SHOW": 6,
+ "EXCEEDED_MAX_SIZE": 7,
+ "APPID_ALIAS_REQUIRED": 8,
+ "NONDEFAULT_MODULE": 9,
+}
+
+func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {
+ p := new(XmppServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x XmppServiceError_ErrorCode) String() string {
+ return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))
+}
+func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = XmppServiceError_ErrorCode(value)
+ return nil
+}
+
+type PresenceResponse_SHOW int32
+
+const (
+ PresenceResponse_NORMAL PresenceResponse_SHOW = 0
+ PresenceResponse_AWAY PresenceResponse_SHOW = 1
+ PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2
+ PresenceResponse_CHAT PresenceResponse_SHOW = 3
+ PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4
+)
+
+var PresenceResponse_SHOW_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "AWAY",
+ 2: "DO_NOT_DISTURB",
+ 3: "CHAT",
+ 4: "EXTENDED_AWAY",
+}
+var PresenceResponse_SHOW_value = map[string]int32{
+ "NORMAL": 0,
+ "AWAY": 1,
+ "DO_NOT_DISTURB": 2,
+ "CHAT": 3,
+ "EXTENDED_AWAY": 4,
+}
+
+func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {
+ p := new(PresenceResponse_SHOW)
+ *p = x
+ return p
+}
+func (x PresenceResponse_SHOW) String() string {
+ return proto.EnumName(PresenceResponse_SHOW_name, int32(x))
+}
+func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW")
+ if err != nil {
+ return err
+ }
+ *x = PresenceResponse_SHOW(value)
+ return nil
+}
+
+type XmppMessageResponse_XmppMessageStatus int32
+
+const (
+ XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0
+ XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1
+ XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2
+)
+
+var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{
+ 0: "NO_ERROR",
+ 1: "INVALID_JID",
+ 2: "OTHER_ERROR",
+}
+var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{
+ "NO_ERROR": 0,
+ "INVALID_JID": 1,
+ "OTHER_ERROR": 2,
+}
+
+func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {
+ p := new(XmppMessageResponse_XmppMessageStatus)
+ *p = x
+ return p
+}
+func (x XmppMessageResponse_XmppMessageStatus) String() string {
+ return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))
+}
+func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus")
+ if err != nil {
+ return err
+ }
+ *x = XmppMessageResponse_XmppMessageStatus(value)
+ return nil
+}
+
+type XmppServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppServiceError) Reset() { *m = XmppServiceError{} }
+func (m *XmppServiceError) String() string { return proto.CompactTextString(m) }
+func (*XmppServiceError) ProtoMessage() {}
+
+type PresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceRequest) Reset() { *m = PresenceRequest{} }
+func (m *PresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*PresenceRequest) ProtoMessage() {}
+
+func (m *PresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *PresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type PresenceResponse struct {
+ IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"`
+ Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"`
+ Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceResponse) Reset() { *m = PresenceResponse{} }
+func (m *PresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*PresenceResponse) ProtoMessage() {}
+
+func (m *PresenceResponse) GetIsAvailable() bool {
+ if m != nil && m.IsAvailable != nil {
+ return *m.IsAvailable
+ }
+ return false
+}
+
+func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {
+ if m != nil && m.Presence != nil {
+ return *m.Presence
+ }
+ return PresenceResponse_NORMAL
+}
+
+func (m *PresenceResponse) GetValid() bool {
+ if m != nil && m.Valid != nil {
+ return *m.Valid
+ }
+ return false
+}
+
+type BulkPresenceRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} }
+func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceRequest) ProtoMessage() {}
+
+func (m *BulkPresenceRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *BulkPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type BulkPresenceResponse struct {
+ PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} }
+func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceResponse) ProtoMessage() {}
+
+func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {
+ if m != nil {
+ return m.PresenceResponse
+ }
+ return nil
+}
+
+type XmppMessageRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"`
+ RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"`
+ Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} }
+func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageRequest) ProtoMessage() {}
+
+const Default_XmppMessageRequest_RawXml bool = false
+const Default_XmppMessageRequest_Type string = "chat"
+
+func (m *XmppMessageRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *XmppMessageRequest) GetBody() string {
+ if m != nil && m.Body != nil {
+ return *m.Body
+ }
+ return ""
+}
+
+func (m *XmppMessageRequest) GetRawXml() bool {
+ if m != nil && m.RawXml != nil {
+ return *m.RawXml
+ }
+ return Default_XmppMessageRequest_RawXml
+}
+
+func (m *XmppMessageRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_XmppMessageRequest_Type
+}
+
+func (m *XmppMessageRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppMessageResponse struct {
+ Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} }
+func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageResponse) ProtoMessage() {}
+
+func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type XmppSendPresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
+ Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"`
+ Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} }
+func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceRequest) ProtoMessage() {}
+
+func (m *XmppSendPresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetShow() string {
+ if m != nil && m.Show != nil {
+ return *m.Show
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppSendPresenceResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} }
+func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceResponse) ProtoMessage() {}
+
+type XmppInviteRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} }
+func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteRequest) ProtoMessage() {}
+
+func (m *XmppInviteRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppInviteRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppInviteResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} }
+func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
new file mode 100644
index 000000000..472d52ebf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
@@ -0,0 +1,83 @@
+syntax = "proto2";
+option go_package = "xmpp";
+
+package appengine;
+
+message XmppServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ INVALID_JID = 2;
+ NO_BODY = 3;
+ INVALID_XML = 4;
+ INVALID_TYPE = 5;
+ INVALID_SHOW = 6;
+ EXCEEDED_MAX_SIZE = 7;
+ APPID_ALIAS_REQUIRED = 8;
+ NONDEFAULT_MODULE = 9;
+ }
+}
+
+message PresenceRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message PresenceResponse {
+ enum SHOW {
+ NORMAL = 0;
+ AWAY = 1;
+ DO_NOT_DISTURB = 2;
+ CHAT = 3;
+ EXTENDED_AWAY = 4;
+ }
+
+ required bool is_available = 1;
+ optional SHOW presence = 2;
+ optional bool valid = 3;
+}
+
+message BulkPresenceRequest {
+ repeated string jid = 1;
+ optional string from_jid = 2;
+}
+
+message BulkPresenceResponse {
+ repeated PresenceResponse presence_response = 1;
+}
+
+message XmppMessageRequest {
+ repeated string jid = 1;
+ required string body = 2;
+ optional bool raw_xml = 3 [ default = false ];
+ optional string type = 4 [ default = "chat" ];
+ optional string from_jid = 5;
+}
+
+message XmppMessageResponse {
+ enum XmppMessageStatus {
+ NO_ERROR = 0;
+ INVALID_JID = 1;
+ OTHER_ERROR = 2;
+ }
+
+ repeated XmppMessageStatus status = 1;
+}
+
+message XmppSendPresenceRequest {
+ required string jid = 1;
+ optional string type = 2;
+ optional string show = 3;
+ optional string status = 4;
+ optional string from_jid = 5;
+}
+
+message XmppSendPresenceResponse {
+}
+
+message XmppInviteRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message XmppInviteResponse {
+}
diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go
new file mode 100644
index 000000000..24d58601b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/api.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+// This file implements the logging API.
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Debugf formats its arguments according to the format, analogous to fmt.Printf,
+// and records the text as a log message at Debug level. The message will be associated
+// with the request linked with the provided context.
+func Debugf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 0, format, args...)
+}
+
+// Infof is like Debugf, but at Info level.
+func Infof(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 1, format, args...)
+}
+
+// Warningf is like Debugf, but at Warning level.
+func Warningf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 2, format, args...)
+}
+
+// Errorf is like Debugf, but at Error level.
+func Errorf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 3, format, args...)
+}
+
+// Criticalf is like Debugf, but at Critical level.
+func Criticalf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 4, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go
new file mode 100644
index 000000000..731ad8c36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of writing and querying an application's logs
+from within an App Engine application.
+
+Example:
+ c := appengine.NewContext(r)
+ query := &log.Query{
+ AppLogs: true,
+ Versions: []string{"1"},
+ }
+
+ for results := query.Run(c); ; {
+ record, err := results.Next()
+ if err == log.Done {
+ log.Infof(c, "Done processing results")
+ break
+ }
+ if err != nil {
+ log.Errorf(c, "Failed to retrieve next log: %v", err)
+ break
+ }
+ log.Infof(c, "Saw record %v", record)
+ }
+*/
+package log // import "google.golang.org/appengine/log"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+ // Start time specifies the earliest log to return (inclusive).
+ StartTime time.Time
+
+ // End time specifies the latest log to return (exclusive).
+ EndTime time.Time
+
+ // Offset specifies a position within the log stream to resume reading from,
+ // and should come from a previously returned Record's field of the same name.
+ Offset []byte
+
+ // Incomplete controls whether active (incomplete) requests should be included.
+ Incomplete bool
+
+ // AppLogs indicates if application-level logs should be included.
+ AppLogs bool
+
+ // ApplyMinLevel indicates if MinLevel should be used to filter results.
+ ApplyMinLevel bool
+
+ // If ApplyMinLevel is true, only logs for requests with at least one
+ // application log of MinLevel or higher will be returned.
+ MinLevel int
+
+ // Versions is the major version IDs whose logs should be retrieved.
+ // Logs for specific modules can be retrieved by the specifying versions
+ // in the form "module:version"; the default module is used if no module
+ // is specified.
+ Versions []string
+
+ // A list of requests to search for instead of a time-based scan. Cannot be
+ // combined with filtering options such as StartTime, EndTime, Offset,
+ // Incomplete, ApplyMinLevel, or Versions.
+ RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+ Time time.Time
+ Level int
+ Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+ AppID string
+ ModuleID string
+ VersionID string
+ RequestID []byte
+ IP string
+ Nickname string
+ AppEngineRelease string
+
+ // The time when this request started.
+ StartTime time.Time
+
+ // The time when this request finished.
+ EndTime time.Time
+
+ // Opaque cursor into the result stream.
+ Offset []byte
+
+ // The time required to process the request.
+ Latency time.Duration
+ MCycles int64
+ Method string
+ Resource string
+ HTTPVersion string
+ Status int32
+
+ // The size of the request sent back to the client, in bytes.
+ ResponseSize int64
+ Referrer string
+ UserAgent string
+ URLMapEntry string
+ Combined string
+ Host string
+
+ // The estimated cost of this request, in dollars.
+ Cost float64
+ TaskQueueName string
+ TaskName string
+ WasLoadingRequest bool
+ PendingTime time.Duration
+ Finished bool
+ AppLogs []AppLog
+
+ // Mostly-unique identifier for the instance that handled the request if available.
+ InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+ logs []*Record
+ context context.Context
+ request *pb.LogReadRequest
+ resultsSeen bool
+ err error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+ if qr.err != nil {
+ return nil, qr.err
+ }
+ if len(qr.logs) > 0 {
+ lr := qr.logs[0]
+ qr.logs = qr.logs[1:]
+ return lr, nil
+ }
+
+ if qr.request.Offset == nil && qr.resultsSeen {
+ return nil, Done
+ }
+
+ if err := qr.run(); err != nil {
+ // Errors here may be retried, so don't store the error.
+ return nil, err
+ }
+
+ return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+ appLogs := make([]AppLog, len(logLines))
+
+ for i, line := range logLines {
+ appLogs[i] = AppLog{
+ Time: time.Unix(0, *line.Time*1e3),
+ Level: int(*line.Level),
+ Message: *line.LogMessage,
+ }
+ }
+
+ return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+ offset, err := proto.Marshal(rl.Offset)
+ if err != nil {
+ offset = nil
+ }
+ return &Record{
+ AppID: *rl.AppId,
+ ModuleID: rl.GetModuleId(),
+ VersionID: *rl.VersionId,
+ RequestID: rl.RequestId,
+ Offset: offset,
+ IP: *rl.Ip,
+ Nickname: rl.GetNickname(),
+ AppEngineRelease: string(rl.GetAppEngineRelease()),
+ StartTime: time.Unix(0, *rl.StartTime*1e3),
+ EndTime: time.Unix(0, *rl.EndTime*1e3),
+ Latency: time.Duration(*rl.Latency) * time.Microsecond,
+ MCycles: *rl.Mcycles,
+ Method: *rl.Method,
+ Resource: *rl.Resource,
+ HTTPVersion: *rl.HttpVersion,
+ Status: *rl.Status,
+ ResponseSize: *rl.ResponseSize,
+ Referrer: rl.GetReferrer(),
+ UserAgent: rl.GetUserAgent(),
+ URLMapEntry: *rl.UrlMapEntry,
+ Combined: *rl.Combined,
+ Host: rl.GetHost(),
+ Cost: rl.GetCost(),
+ TaskQueueName: rl.GetTaskQueueName(),
+ TaskName: rl.GetTaskName(),
+ WasLoadingRequest: rl.GetWasLoadingRequest(),
+ PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
+ Finished: rl.GetFinished(),
+ AppLogs: protoToAppLogs(rl.Line),
+ InstanceID: string(rl.GetCloneKey()),
+ }
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c context.Context) *Result {
+ req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))
+ return &Result{
+ context: c,
+ request: req,
+ err: err,
+ }
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+ req := &pb.LogReadRequest{}
+ req.AppId = &appID
+ if !params.StartTime.IsZero() {
+ req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+ }
+ if !params.EndTime.IsZero() {
+ req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+ }
+ if len(params.Offset) > 0 {
+ var offset pb.LogOffset
+ if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+ return nil, fmt.Errorf("bad Offset: %v", err)
+ }
+ req.Offset = &offset
+ }
+ if params.Incomplete {
+ req.IncludeIncomplete = &params.Incomplete
+ }
+ if params.AppLogs {
+ req.IncludeAppLogs = &params.AppLogs
+ }
+ if params.ApplyMinLevel {
+ req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+ }
+ if params.Versions == nil {
+ // If no versions were specified, default to the default module at
+ // the major version being used by this module.
+ if i := strings.Index(versionID, "."); i >= 0 {
+ versionID = versionID[:i]
+ }
+ req.VersionId = []string{versionID}
+ } else {
+ req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+ for _, v := range params.Versions {
+ var m *string
+ if i := strings.Index(v, ":"); i >= 0 {
+ m, v = proto.String(v[:i]), v[i+1:]
+ }
+ req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+ ModuleId: m,
+ VersionId: proto.String(v),
+ })
+ }
+ }
+ if params.RequestIDs != nil {
+ ids := make([][]byte, len(params.RequestIDs))
+ for i, v := range params.RequestIDs {
+ ids[i] = []byte(v)
+ }
+ req.RequestId = ids
+ }
+
+ return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+ res := &pb.LogReadResponse{}
+ if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil {
+ return err
+ }
+
+ r.logs = make([]*Record, len(res.Log))
+ r.request.Offset = res.Offset
+ r.resultsSeen = true
+
+ for i, log := range res.Log {
+ r.logs[i] = protoToRecord(log)
+ }
+
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/log/log_test.go b/vendor/google.golang.org/appengine/log/log_test.go
new file mode 100644
index 000000000..726468e23
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log_test.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "google.golang.org/appengine/internal/log"
+)
+
+func TestQueryToRequest(t *testing.T) {
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.LogReadRequest
+ }{
+ {
+ desc: "Empty",
+ query: &Query{},
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ VersionId: []string{"v12"},
+ },
+ },
+ {
+ desc: "Versions",
+ query: &Query{
+ Versions: []string{"alpha", "backend:beta"},
+ },
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ ModuleVersion: []*pb.LogModuleVersion{
+ {
+ VersionId: proto.String("alpha"),
+ }, {
+ ModuleId: proto.String("backend"),
+ VersionId: proto.String("beta"),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ req, err := makeRequest(tt.query, "s~fake", "v12")
+
+ if err != nil {
+ t.Errorf("%s: got err %v, want nil", tt.desc, err)
+ continue
+ }
+ if !proto.Equal(req, tt.want) {
+ t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want)
+ }
+ }
+}
+
+func TestProtoToRecord(t *testing.T) {
+ // We deliberately leave ModuleId and other optional fields unset.
+ p := &pb.RequestLog{
+ AppId: proto.String("s~fake"),
+ VersionId: proto.String("1"),
+ RequestId: []byte("deadbeef"),
+ Ip: proto.String("127.0.0.1"),
+ StartTime: proto.Int64(431044244000000),
+ EndTime: proto.Int64(431044724000000),
+ Latency: proto.Int64(480000000),
+ Mcycles: proto.Int64(7),
+ Method: proto.String("GET"),
+ Resource: proto.String("/app"),
+ HttpVersion: proto.String("1.1"),
+ Status: proto.Int32(418),
+ ResponseSize: proto.Int64(1337),
+ UrlMapEntry: proto.String("_go_app"),
+ Combined: proto.String("apache log"),
+ }
+ // Sanity check that all required fields are set.
+ if _, err := proto.Marshal(p); err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ want := &Record{
+ AppID: "s~fake",
+ ModuleID: "default",
+ VersionID: "1",
+ RequestID: []byte("deadbeef"),
+ IP: "127.0.0.1",
+ StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC),
+ EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC),
+ Latency: 8 * time.Minute,
+ MCycles: 7,
+ Method: "GET",
+ Resource: "/app",
+ HTTPVersion: "1.1",
+ Status: 418,
+ ResponseSize: 1337,
+ URLMapEntry: "_go_app",
+ Combined: "apache log",
+ Finished: true,
+ AppLogs: []AppLog{},
+ }
+ got := protoToRecord(p)
+ // Coerce locations to UTC since otherwise they will be in local.
+ got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC()
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go
new file mode 100644
index 000000000..1ce1e8706
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail.go
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package mail provides the means of sending email from an
+App Engine application.
+
+Example:
+ msg := &mail.Message{
+ Sender: "romeo@montague.com",
+ To: []string{"Juliet <juliet@capulet.org>"},
+ Subject: "See you tonight",
+ Body: "Don't forget our plans. Hark, 'til later.",
+ }
+ if err := mail.Send(c, msg); err != nil {
+ log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err)
+ }
+*/
+package mail // import "google.golang.org/appengine/mail"
+
+import (
+ "net/mail"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ bpb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+// A Message represents an email message.
+// Addresses may be of any form permitted by RFC 822.
+type Message struct {
+ // Sender must be set, and must be either an application admin
+ // or the currently signed-in user.
+ Sender string
+ ReplyTo string // may be empty
+
+ // At least one of these slices must have a non-zero length,
+ // except when calling SendToAdmins.
+ To, Cc, Bcc []string
+
+ Subject string
+
+ // At least one of Body or HTMLBody must be non-empty.
+ Body string
+ HTMLBody string
+
+ Attachments []Attachment
+
+ // Extra mail headers.
+ // See https://cloud.google.com/appengine/docs/standard/go/mail/
+ // for permissible headers.
+ Headers mail.Header
+}
+
+// An Attachment represents an email attachment.
+type Attachment struct {
+ // Name must be set to a valid file name.
+ Name string
+ Data []byte
+ ContentID string
+}
+
+// Send sends an email message.
+func Send(c context.Context, msg *Message) error {
+ return send(c, "Send", msg)
+}
+
+// SendToAdmins sends an email message to the application's administrators.
+func SendToAdmins(c context.Context, msg *Message) error {
+ return send(c, "SendToAdmins", msg)
+}
+
+func send(c context.Context, method string, msg *Message) error {
+ req := &pb.MailMessage{
+ Sender: &msg.Sender,
+ To: msg.To,
+ Cc: msg.Cc,
+ Bcc: msg.Bcc,
+ Subject: &msg.Subject,
+ }
+ if msg.ReplyTo != "" {
+ req.ReplyTo = &msg.ReplyTo
+ }
+ if msg.Body != "" {
+ req.TextBody = &msg.Body
+ }
+ if msg.HTMLBody != "" {
+ req.HtmlBody = &msg.HTMLBody
+ }
+ if len(msg.Attachments) > 0 {
+ req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))
+ for i, att := range msg.Attachments {
+ req.Attachment[i] = &pb.MailAttachment{
+ FileName: proto.String(att.Name),
+ Data: att.Data,
+ }
+ if att.ContentID != "" {
+ req.Attachment[i].ContentID = proto.String(att.ContentID)
+ }
+ }
+ }
+ for key, vs := range msg.Headers {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.MailHeader{
+ Name: proto.String(key),
+ Value: proto.String(v),
+ })
+ }
+ }
+ res := &bpb.VoidProto{}
+ if err := internal.Call(c, "mail", method, req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail_test.go b/vendor/google.golang.org/appengine/mail/mail_test.go
new file mode 100644
index 000000000..7502c5973
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package mail
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+func TestMessageConstruction(t *testing.T) {
+ var got *pb.MailMessage
+ c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error {
+ got = in
+ return nil
+ })
+
+ msg := &Message{
+ Sender: "dsymonds@example.com",
+ To: []string{"nigeltao@example.com"},
+ Body: "Hey, lunch time?",
+ Attachments: []Attachment{
+ // Regression test for a prod bug. The address of a range variable was used when
+ // constructing the outgoing proto, so multiple attachments used the same name.
+ {
+ Name: "att1.txt",
+ Data: []byte("data1"),
+ ContentID: "<att1>",
+ },
+ {
+ Name: "att2.txt",
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if err := Send(c, msg); err != nil {
+ t.Fatalf("Send: %v", err)
+ }
+ want := &pb.MailMessage{
+ Sender: proto.String("dsymonds@example.com"),
+ To: []string{"nigeltao@example.com"},
+ Subject: proto.String(""),
+ TextBody: proto.String("Hey, lunch time?"),
+ Attachment: []*pb.MailAttachment{
+ {
+ FileName: proto.String("att1.txt"),
+ Data: []byte("data1"),
+ ContentID: proto.String("<att1>"),
+ },
+ {
+ FileName: proto.String("att2.txt"),
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 000000000..d8eed4be7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,526 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+// item0, err := memcache.Get(c, "key")
+// if err != nil && err != memcache.ErrCacheMiss {
+// return err
+// }
+// if err == nil {
+// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+// } else {
+// fmt.Fprintf(w, "memcache miss\n")
+// }
+//
+// and
+//
+// item1 := &memcache.Item{
+// Key: "foo",
+// Value: []byte("bar"),
+// }
+// if err := memcache.Set(c, item1); err != nil {
+// return err
+// }
+package memcache // import "google.golang.org/appengine/memcache"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+ // ErrCacheMiss means that an operation failed
+ // because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+ // ErrServerError means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+ // Value is the Item's value.
+ Value []byte
+ // Object is the Item's value for use with a Codec.
+ Object interface{}
+ // Flags are server-opaque flags whose semantics are entirely up to the
+ // App Engine app.
+ Flags uint32
+ // Expiration is the maximum duration that the item will stay
+ // in the cache.
+ // The zero value means the Item has no expiration time.
+ // Subsecond precision is ignored.
+ // This is not set when getting items.
+ Expiration time.Duration
+ // casID is a client-opaque value used for compare-and-swap operations.
+ // Zero means that compare-and-swap is not used.
+ casID uint64
+}
+
+const (
+ secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+ thirtyYears = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+ return &Item{
+ Key: string(p.Key),
+ Value: p.Value,
+ Flags: p.GetFlags(),
+ casID: p.GetCasId(),
+ }
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c context.Context, key string) (*Item, error) {
+ m, err := GetMulti(c, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := m[key]; !ok {
+ return nil, ErrCacheMiss
+ }
+ return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c context.Context, key []string) (map[string]*Item, error) {
+ if len(key) == 0 {
+ return nil, nil
+ }
+ keyAsBytes := make([][]byte, len(key))
+ for i, k := range key {
+ keyAsBytes[i] = []byte(k)
+ }
+ req := &pb.MemcacheGetRequest{
+ Key: keyAsBytes,
+ ForCas: proto.Bool(true),
+ }
+ res := &pb.MemcacheGetResponse{}
+ if err := internal.Call(c, "memcache", "Get", req, res); err != nil {
+ return nil, err
+ }
+ m := make(map[string]*Item, len(res.Item))
+ for _, p := range res.Item {
+ t := protoToItem(p)
+ m[t.Key] = t
+ }
+ return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c context.Context, key string) error {
+ return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c context.Context, key []string) error {
+ if len(key) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheDeleteRequest{
+ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+ }
+ for i, k := range key {
+ req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+ }
+ res := &pb.MemcacheDeleteResponse{}
+ if err := internal.Call(c, "memcache", "Delete", req, res); err != nil {
+ return err
+ }
+ if len(res.DeleteStatus) != len(key) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(key)), false
+ for i, s := range res.DeleteStatus {
+ switch s {
+ case pb.MemcacheDeleteResponse_DELETED:
+ // OK
+ case pb.MemcacheDeleteResponse_NOT_FOUND:
+ me[i] = ErrCacheMiss
+ any = true
+ default:
+ me[i] = ErrServerError
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcache, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+ return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {
+ return incr(c, key, delta, nil)
+}
+
+func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+ req := &pb.MemcacheIncrementRequest{
+ Key: []byte(key),
+ InitialValue: initialValue,
+ }
+ if delta >= 0 {
+ req.Delta = proto.Uint64(uint64(delta))
+ } else {
+ req.Delta = proto.Uint64(uint64(-delta))
+ req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+ }
+ res := &pb.MemcacheIncrementResponse{}
+ err = internal.Call(c, "memcache", "Increment", req, res)
+ if err != nil {
+ return
+ }
+ if res.NewValue == nil {
+ return 0, ErrCacheMiss
+ }
+ return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+ if len(item) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheSetRequest{
+ Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+ }
+ for i, t := range item {
+ p := &pb.MemcacheSetRequest_Item{
+ Key: []byte(t.Key),
+ }
+ if value == nil {
+ p.Value = t.Value
+ } else {
+ p.Value = value[i]
+ }
+ if t.Flags != 0 {
+ p.Flags = proto.Uint32(t.Flags)
+ }
+ if t.Expiration != 0 {
+ // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+ // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+ // Throughout this .go file, we use int32.
+ // Also, in the proto, the expiration value is either a duration (in seconds)
+ // or an absolute Unix timestamp (in seconds), depending on whether the
+ // value is less than or greater than or equal to 30 years, respectively.
+ if t.Expiration < time.Second {
+ // Because an Expiration of 0 means no expiration, we take
+ // care here to translate an item with an expiration
+ // Duration between 0-1 seconds as immediately expiring
+ // (saying it expired a few seconds ago), rather than
+ // rounding it down to 0 and making it live forever.
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+ } else if t.Expiration >= thirtyYears {
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+ } else {
+ p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+ }
+ }
+ if t.casID != 0 {
+ p.CasId = proto.Uint64(t.casID)
+ p.ForCas = proto.Bool(true)
+ }
+ p.SetPolicy = policy.Enum()
+ req.Item[i] = p
+ }
+ res := &pb.MemcacheSetResponse{}
+ if err := internal.Call(c, "memcache", "Set", req, res); err != nil {
+ return err
+ }
+ if len(res.SetStatus) != len(item) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(item)), false
+ for i, st := range res.SetStatus {
+ var err error
+ switch st {
+ case pb.MemcacheSetResponse_STORED:
+ // OK
+ case pb.MemcacheSetResponse_NOT_STORED:
+ err = ErrNotStored
+ case pb.MemcacheSetResponse_EXISTS:
+ err = ErrCASConflict
+ default:
+ err = ErrServerError
+ }
+ if err != nil {
+ me[i] = err
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+ Marshal func(interface{}) ([]byte, error)
+ Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {
+ i, err := Get(c, key)
+ if err != nil {
+ return nil, err
+ }
+ if err := cd.Unmarshal(i.Value, v); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+ var vs [][]byte
+ var me appengine.MultiError
+ for i, item := range items {
+ v, err := cd.Marshal(item.Object)
+ if err != nil {
+ if me == nil {
+ me = make(appengine.MultiError, len(items))
+ }
+ me[i] = err
+ continue
+ }
+ if me == nil {
+ vs = append(vs, v)
+ }
+ }
+ if me != nil {
+ return me
+ }
+
+ return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+ // Gob is a Codec that uses the gob package.
+ Gob = Codec{gobMarshal, gobUnmarshal}
+ // JSON is a Codec that uses the json package.
+ JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+ return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+ Hits uint64 // Counter of cache hits
+ Misses uint64 // Counter of cache misses
+ ByteHits uint64 // Counter of bytes transferred for gets
+
+ Items uint64 // Items currently in the cache
+ Bytes uint64 // Size of all items currently in the cache
+
+ Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.MemcacheStatsRequest{}
+ res := &pb.MemcacheStatsResponse{}
+ if err := internal.Call(c, "memcache", "Stats", req, res); err != nil {
+ return nil, err
+ }
+ if res.Stats == nil {
+ return nil, ErrNoStats
+ }
+ return &Statistics{
+ Hits: *res.Stats.Hits,
+ Misses: *res.Stats.Misses,
+ ByteHits: *res.Stats.ByteHits,
+ Items: *res.Stats.Items,
+ Bytes: *res.Stats.Bytes,
+ Oldest: int64(*res.Stats.OldestItemAge),
+ }, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c context.Context) error {
+ req := &pb.MemcacheFlushRequest{}
+ res := &pb.MemcacheFlushResponse{}
+ return internal.Call(c, "memcache", "FlushAll", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ switch m := m.(type) {
+ case *pb.MemcacheDeleteRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheGetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheIncrementRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheSetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+ internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache_test.go b/vendor/google.golang.org/appengine/memcache/memcache_test.go
new file mode 100644
index 000000000..1dc7da471
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache_test.go
@@ -0,0 +1,263 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package memcache
+
+import (
+ "fmt"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var errRPC = fmt.Errorf("RPC error")
+
+func TestGetRequest(t *testing.T) {
+ serviceCalled := false
+ apiKey := "lyric"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ // Test request.
+ if n := len(req.Key); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ if k := string(req.Key[0]); k != apiKey {
+ t.Errorf("got %q want %q", k, apiKey)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ // Test the "forward" path from the API call parameters to the
+ // protobuf request object. (The "backward" path from the
+ // protobuf response object to the API call response,
+ // including the error response, are handled in the next few
+ // tests).
+ Get(c, apiKey)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestGetResponseHit(t *testing.T) {
+ key := "lyric"
+ value := "Where the buffalo roam"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ res.Item = []*pb.MemcacheGetResponse_Item{
+ {Key: []byte(key), Value: []byte(value)},
+ }
+ return nil
+ })
+ apiItem, err := Get(c, key)
+ if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value {
+ t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value)
+ }
+}
+
+func TestGetResponseMiss(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ // don't fill in any of the response
+ return nil
+ })
+ _, err := Get(c, "something")
+ if err != ErrCacheMiss {
+ t.Errorf("got %v want ErrCacheMiss", err)
+ }
+}
+
+func TestGetResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ return errRPC
+ })
+
+ if _, err := Get(c, "something"); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestAddRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Oh, give me a home"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(apiItem.Value); v != string(pbItem.Value) {
+ t.Errorf("got %q want %q", v, string(pbItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Add(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestAddResponseStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestAddResponseNotStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrNotStored {
+ t.Errorf("got %v want ErrNotStored", err)
+ }
+}
+
+func TestAddResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestAddResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ return errRPC
+ })
+
+ if err := Add(c, &Item{}); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestSetRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Where the buffalo roam"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ if n := len(req.Item); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(pbItem.Value); v != string(apiItem.Value) {
+ t.Errorf("got %q want %q", v, string(apiItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Set(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestSetResponse(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestSetResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ namec <- req.NameSpace
+ return errRPC
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ Get(c0, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c0: ns = %q, want nil`, *ns)
+ }
+
+ Get(c1, "key")
+ if ns := <-namec; ns == nil {
+ t.Error(`Get with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Get with c1: ns = %q, want "A"`, *ns)
+ }
+
+ Get(c2, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c2: ns = %q, want nil`, *ns)
+ }
+}
+
+func TestGetMultiEmpty(t *testing.T) {
+ serviceCalled := false
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ serviceCalled = true
+ return nil
+ })
+
+ // Test that the Memcache service is not called when
+ // GetMulti is passed an empty slice of keys.
+ GetMulti(c, []string{})
+ if serviceCalled {
+ t.Error("Service was called but should not have been")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go
new file mode 100644
index 000000000..88e6629ac
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module.go
@@ -0,0 +1,113 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package module provides functions for interacting with modules.
+
+The appengine package contains functions that report the identity of the app,
+including the module name.
+*/
+package module // import "google.golang.org/appengine/module"
+
+import (
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+// List returns the names of modules belonging to this application.
+func List(c context.Context) ([]string, error) {
+ req := &pb.GetModulesRequest{}
+ res := &pb.GetModulesResponse{}
+ err := internal.Call(c, "modules", "GetModules", req, res)
+ return res.Module, err
+}
+
+// NumInstances returns the number of instances of the given module/version.
+// If either argument is the empty string it means the default.
+func NumInstances(c context.Context, module, version string) (int, error) {
+ req := &pb.GetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.GetNumInstancesResponse{}
+
+ if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil {
+ return 0, err
+ }
+ return int(*res.Instances), nil
+}
+
+// SetNumInstances sets the number of instances of the given module.version to the
+// specified value. If either module or version are the empty string it means the
+// default.
+func SetNumInstances(c context.Context, module, version string, instances int) error {
+ req := &pb.SetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ req.Instances = proto.Int64(int64(instances))
+ res := &pb.SetNumInstancesResponse{}
+ return internal.Call(c, "modules", "SetNumInstances", req, res)
+}
+
+// Versions returns the names of the versions that belong to the specified module.
+// If module is the empty string, it means the default module.
+func Versions(c context.Context, module string) ([]string, error) {
+ req := &pb.GetVersionsRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetVersionsResponse{}
+ err := internal.Call(c, "modules", "GetVersions", req, res)
+ return res.GetVersion(), err
+}
+
+// DefaultVersion returns the default version of the specified module.
+// If module is the empty string, it means the default module.
+func DefaultVersion(c context.Context, module string) (string, error) {
+ req := &pb.GetDefaultVersionRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetDefaultVersionResponse{}
+ err := internal.Call(c, "modules", "GetDefaultVersion", req, res)
+ return res.GetVersion(), err
+}
+
+// Start starts the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Start(c context.Context, module, version string) error {
+ req := &pb.StartModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StartModuleResponse{}
+ return internal.Call(c, "modules", "StartModule", req, res)
+}
+
+// Stop stops the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Stop(c context.Context, module, version string) error {
+ req := &pb.StopModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StopModuleResponse{}
+ return internal.Call(c, "modules", "StopModule", req, res)
+}
diff --git a/vendor/google.golang.org/appengine/module/module_test.go b/vendor/google.golang.org/appengine/module/module_test.go
new file mode 100644
index 000000000..73e8971dc
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module_test.go
@@ -0,0 +1,124 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+const version = "test-version"
+const module = "test-module"
+const instances = 3
+
+func TestList(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error {
+ res.Module = []string{"default", "mod1"}
+ return nil
+ })
+ got, err := List(c)
+ if err != nil {
+ t.Fatalf("List: %v", err)
+ }
+ want := []string{"default", "mod1"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("List = %v, want %v", got, want)
+ }
+}
+
+func TestSetNumInstances(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ if *req.Instances != instances {
+ t.Errorf("Instances = %v, want %d", req.Instances, instances)
+ }
+ return nil
+ })
+ err := SetNumInstances(c, module, version, instances)
+ if err != nil {
+ t.Fatalf("SetNumInstances: %v", err)
+ }
+}
+
+func TestVersions(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = []string{"v1", "v2", "v3"}
+ return nil
+ })
+ got, err := Versions(c, module)
+ if err != nil {
+ t.Fatalf("Versions: %v", err)
+ }
+ want := []string{"v1", "v2", "v3"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Versions = %v, want %v", got, want)
+ }
+}
+
+func TestDefaultVersion(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = proto.String(version)
+ return nil
+ })
+ got, err := DefaultVersion(c, module)
+ if err != nil {
+ t.Fatalf("DefaultVersion: %v", err)
+ }
+ if got != version {
+ t.Errorf("Version = %v, want %v", got, version)
+ }
+}
+
+func TestStart(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Start(c, module, version)
+ if err != nil {
+ t.Fatalf("Start: %v", err)
+ }
+}
+
+func TestStop(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error {
+ version := "test-version"
+ module := "test-module"
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Stop(c, module, version)
+ if err != nil {
+ t.Fatalf("Stop: %v", err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 000000000..21860ca08
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go
new file mode 100644
index 000000000..847f640bd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestNamespaceValidity(t *testing.T) {
+ testCases := []struct {
+ namespace string
+ ok bool
+ }{
+ // data from Python's namespace_manager_test.py
+ {"", true},
+ {"__a.namespace.123__", true},
+ {"-_A....NAMESPACE-_", true},
+ {"-", true},
+ {".", true},
+ {".-", true},
+
+ {"?", false},
+ {"+", false},
+ {"!", false},
+ {" ", false},
+ }
+ for _, tc := range testCases {
+ _, err := Namespace(context.Background(), tc.namespace)
+ if err == nil && !tc.ok {
+ t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace)
+ } else if err != nil && tc.ok {
+ t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go
new file mode 100644
index 000000000..ce8aab562
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client.go
@@ -0,0 +1,194 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+// This file provides the client for connecting remotely to a user's production
+// application.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+)
+
+// Client is a connection to the production APIs for an application.
+type Client struct {
+ hc *http.Client
+ url string
+ appID string
+}
+
+// NewClient returns a client for the given host. All communication will
+// be performed over SSL unless the host is localhost.
+func NewClient(host string, client *http.Client) (*Client, error) {
+ // Add an appcfg header to outgoing requests.
+ wrapClient := new(http.Client)
+ *wrapClient = *client
+ t := client.Transport
+ if t == nil {
+ t = http.DefaultTransport
+ }
+ wrapClient.Transport = &headerAddingRoundTripper{t}
+
+ url := url.URL{
+ Scheme: "https",
+ Host: host,
+ Path: "/_ah/remote_api",
+ }
+ if host == "localhost" || strings.HasPrefix(host, "localhost:") {
+ url.Scheme = "http"
+ }
+ u := url.String()
+ appID, err := getAppID(wrapClient, u)
+ if err != nil {
+ return nil, fmt.Errorf("unable to contact server: %v", err)
+ }
+ return &Client{
+ hc: wrapClient,
+ url: u,
+ appID: appID,
+ }, nil
+}
+
+// NewContext returns a copy of parent that will cause App Engine API
+// calls to be sent to the client's remote host.
+func (c *Client) NewContext(parent context.Context) context.Context {
+ ctx := internal.WithCallOverride(parent, c.call)
+ ctx = internal.WithLogOverride(ctx, c.logf)
+ ctx = internal.WithAppIDOverride(ctx, c.appID)
+ return ctx
+}
+
+// NewRemoteContext returns a context that gives access to the production
+// APIs for the application at the given host. All communication will be
+// performed over SSL unless the host is localhost.
+func NewRemoteContext(host string, client *http.Client) (context.Context, error) {
+ c, err := NewClient(host, client)
+ if err != nil {
+ return nil, err
+ }
+ return c.NewContext(context.Background()), nil
+}
+
+var logLevels = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func (c *Client) logf(level int64, format string, args ...interface{}) {
+ log.Printf(logLevels[level]+": "+format, args...)
+}
+
+func (c *Client) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ req, err := proto.Marshal(in)
+ if err != nil {
+ return fmt.Errorf("error marshalling request: %v", err)
+ }
+
+ remReq := &pb.Request{
+ ServiceName: proto.String(service),
+ Method: proto.String(method),
+ Request: req,
+ // NOTE(djd): RequestId is unused in the server.
+ }
+
+ req, err = proto.Marshal(remReq)
+ if err != nil {
+ return fmt.Errorf("proto.Marshal: %v", err)
+ }
+
+ // TODO(djd): Respect ctx.Deadline()?
+ resp, err := c.hc.Post(c.url, "application/octet-stream", bytes.NewReader(req))
+ if err != nil {
+ return fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return fmt.Errorf("failed reading response: %v", err)
+ }
+ remResp := &pb.Response{}
+ if err := proto.Unmarshal(body, remResp); err != nil {
+ return fmt.Errorf("error unmarshalling response: %v", err)
+ }
+
+ if ae := remResp.GetApplicationError(); ae != nil {
+ return &internal.APIError{
+ Code: ae.GetCode(),
+ Detail: ae.GetDetail(),
+ Service: service,
+ }
+ }
+
+ if remResp.Response == nil {
+ return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp))
+ }
+
+ return proto.Unmarshal(remResp.Response, out)
+}
+
+// This is a forgiving regexp designed to parse the app ID from YAML.
+var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`)
+
+func getAppID(client *http.Client, url string) (string, error) {
+ // Generate a pseudo-random token for handshaking.
+ token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+ resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed reading response: %v", err)
+ }
+
+ // Check the token is present in response.
+ if !bytes.Contains(body, []byte(token)) {
+ return "", fmt.Errorf("token not found: want %q; body %q", token, body)
+ }
+
+ match := appIDRE.FindSubmatch(body)
+ if match == nil {
+ return "", fmt.Errorf("app ID not found: body %q", body)
+ }
+
+ return string(match[1]), nil
+}
+
+type headerAddingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ r.Header.Set("X-Appcfg-Api-Version", "1")
+ return t.Wrapped.RoundTrip(r)
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client_test.go b/vendor/google.golang.org/appengine/remote_api/client_test.go
new file mode 100644
index 000000000..7f4bdcf3c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client_test.go
@@ -0,0 +1,43 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+import (
+ "log"
+ "net/http"
+ "testing"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/datastore"
+)
+
+func TestAppIDRE(t *testing.T) {
+ appID := "s~my-appid-539"
+ tests := []string{
+ "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n",
+ "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n",
+ `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`,
+ `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`,
+ }
+ for _, v := range tests {
+ if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID {
+ t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID)
+ }
+ }
+}
+
+func ExampleClient() {
+ c, err := NewClient("example.appspot.com", http.DefaultClient)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ ctx := context.Background() // or from a request
+ ctx = c.NewContext(ctx)
+ _, err = datastore.Put(ctx, datastore.NewIncompleteKey(ctx, "Foo", nil), struct{ Bar int }{42})
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go
new file mode 100644
index 000000000..3d2880d64
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go
@@ -0,0 +1,152 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package remote_api implements the /_ah/remote_api endpoint.
+This endpoint is used by offline tools such as the bulk loader.
+*/
+package remote_api // import "google.golang.org/appengine/remote_api"
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+func init() {
+ http.HandleFunc("/_ah/remote_api", handle)
+}
+
+func handle(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+
+ u := user.Current(c)
+ if u == nil {
+ u, _ = user.CurrentOAuth(c,
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/appengine.apis",
+ )
+ }
+
+ if !appengine.IsDevAppServer() && (u == nil || !u.Admin) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusUnauthorized)
+ io.WriteString(w, "You must be logged in as an administrator to access this.\n")
+ return
+ }
+ if req.Header.Get("X-Appcfg-Api-Version") == "" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusForbidden)
+ io.WriteString(w, "This request did not contain a necessary header.\n")
+ return
+ }
+
+ if req.Method != "POST" {
+ // Response must be YAML.
+ rtok := req.FormValue("rtok")
+ if rtok == "" {
+ rtok = "0"
+ }
+ w.Header().Set("Content-Type", "text/yaml; charset=utf-8")
+ fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok)
+ return
+ }
+
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Failed reading body: %v", err)
+ return
+ }
+ remReq := &pb.Request{}
+ if err := proto.Unmarshal(body, remReq); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Bad body: %v", err)
+ return
+ }
+
+ service, method := *remReq.ServiceName, *remReq.Method
+ if !requestSupported(service, method) {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Unsupported RPC /%s.%s", service, method)
+ return
+ }
+
+ rawReq := &rawMessage{remReq.Request}
+ rawRes := &rawMessage{}
+ err = internal.Call(c, service, method, rawReq, rawRes)
+
+ remRes := &pb.Response{}
+ if err == nil {
+ remRes.Response = rawRes.buf
+ } else if ae, ok := err.(*internal.APIError); ok {
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: &ae.Code,
+ Detail: &ae.Detail,
+ }
+ } else {
+ // This shouldn't normally happen.
+ log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err)
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: proto.Int32(0),
+ Detail: proto.String(err.Error()),
+ }
+ }
+ out, err := proto.Marshal(remRes)
+ if err != nil {
+ // This should not be possible.
+ w.WriteHeader(500)
+ log.Errorf(c, "proto.Marshal: %v", err)
+ return
+ }
+
+ log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(len(out)))
+ w.Write(out)
+}
+
+// rawMessage is a protocol buffer type that is already serialised.
+// This allows the remote_api code here to handle messages
+// without having to know the real type.
+type rawMessage struct {
+ buf []byte
+}
+
+func (rm *rawMessage) Marshal() ([]byte, error) {
+ return rm.buf, nil
+}
+
+func (rm *rawMessage) Unmarshal(buf []byte) error {
+ rm.buf = make([]byte, len(buf))
+ copy(rm.buf, buf)
+ return nil
+}
+
+func requestSupported(service, method string) bool {
+ // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py
+ switch service {
+ case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3",
+ "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore",
+ "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp":
+ return true
+ }
+ return false
+}
+
+// Methods to satisfy proto.Message.
+func (rm *rawMessage) Reset() { rm.buf = nil }
+func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }
+func (*rawMessage) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime.go b/vendor/google.golang.org/appengine/runtime/runtime.go
new file mode 100644
index 000000000..fa6c12b79
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime.go
@@ -0,0 +1,148 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package runtime exposes information about the resource usage of the application.
+It also provides a way to run code in a new background context of a module.
+
+This package does not work on App Engine "flexible environment".
+*/
+package runtime // import "google.golang.org/appengine/runtime"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+// Statistics represents the system's statistics.
+type Statistics struct {
+ // CPU records the CPU consumed by this instance, in megacycles.
+ CPU struct {
+ Total float64
+ Rate1M float64 // consumption rate over one minute
+ Rate10M float64 // consumption rate over ten minutes
+ }
+ // RAM records the memory used by the instance, in megabytes.
+ RAM struct {
+ Current float64
+ Average1M float64 // average usage over one minute
+ Average10M float64 // average usage over ten minutes
+ }
+}
+
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.GetSystemStatsRequest{}
+ res := &pb.GetSystemStatsResponse{}
+ if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil {
+ return nil, err
+ }
+ s := &Statistics{}
+ if res.Cpu != nil {
+ s.CPU.Total = res.Cpu.GetTotal()
+ s.CPU.Rate1M = res.Cpu.GetRate1M()
+ s.CPU.Rate10M = res.Cpu.GetRate10M()
+ }
+ if res.Memory != nil {
+ s.RAM.Current = res.Memory.GetCurrent()
+ s.RAM.Average1M = res.Memory.GetAverage1M()
+ s.RAM.Average10M = res.Memory.GetAverage10M()
+ }
+ return s, nil
+}
+
+/*
+RunInBackground makes an API call that triggers an /_ah/background request.
+
+There are two independent code paths that need to make contact:
+the RunInBackground code, and the /_ah/background handler. The matchmaker
+loop arranges for the two paths to meet. The RunInBackground code passes
+a send to the matchmaker, the /_ah/background passes a recv to the matchmaker,
+and the matchmaker hooks them up.
+*/
+
+func init() {
+ http.HandleFunc("/_ah/background", handleBackground)
+
+ sc := make(chan send)
+ rc := make(chan recv)
+ sendc, recvc = sc, rc
+ go matchmaker(sc, rc)
+}
+
+var (
+ sendc chan<- send // RunInBackground sends to this
+ recvc chan<- recv // handleBackground sends to this
+)
+
+type send struct {
+ id string
+ f func(context.Context)
+}
+
+type recv struct {
+ id string
+ ch chan<- func(context.Context)
+}
+
+func matchmaker(sendc <-chan send, recvc <-chan recv) {
+ // When one side of the match arrives before the other
+ // it is inserted in the corresponding map.
+ waitSend := make(map[string]send)
+ waitRecv := make(map[string]recv)
+
+ for {
+ select {
+ case s := <-sendc:
+ if r, ok := waitRecv[s.id]; ok {
+ // meet!
+ delete(waitRecv, s.id)
+ r.ch <- s.f
+ } else {
+ // waiting for r
+ waitSend[s.id] = s
+ }
+ case r := <-recvc:
+ if s, ok := waitSend[r.id]; ok {
+ // meet!
+ delete(waitSend, r.id)
+ r.ch <- s.f
+ } else {
+ // waiting for s
+ waitRecv[r.id] = r
+ }
+ }
+ }
+}
+
+var newContext = appengine.NewContext // for testing
+
+func handleBackground(w http.ResponseWriter, req *http.Request) {
+ id := req.Header.Get("X-AppEngine-BackgroundRequest")
+
+ ch := make(chan func(context.Context))
+ recvc <- recv{id, ch}
+ (<-ch)(newContext(req))
+}
+
+// RunInBackground runs f in a background goroutine in this process.
+// f is provided a context that may outlast the context provided to RunInBackground.
+// This is only valid to invoke from a service set to basic or manual scaling.
+func RunInBackground(c context.Context, f func(c context.Context)) error {
+ req := &pb.StartBackgroundRequestRequest{}
+ res := &pb.StartBackgroundRequestResponse{}
+ if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil {
+ return err
+ }
+ sendc <- send{res.GetRequestId(), f}
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime_test.go b/vendor/google.golang.org/appengine/runtime/runtime_test.go
new file mode 100644
index 000000000..8f3a124d2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime_test.go
@@ -0,0 +1,101 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+func TestRunInBackgroundSendFirst(t *testing.T) { testRunInBackground(t, true) }
+func TestRunInBackgroundRecvFirst(t *testing.T) { testRunInBackground(t, false) }
+
+func testRunInBackground(t *testing.T, sendFirst bool) {
+ srv := httptest.NewServer(nil)
+ defer srv.Close()
+
+ const id = "f00bar"
+ sendWait, recvWait := make(chan bool), make(chan bool)
+ sbr := make(chan bool) // strobed when system.StartBackgroundRequest has started
+
+ calls := 0
+ c := aetesting.FakeSingleContext(t, "system", "StartBackgroundRequest", func(req *pb.StartBackgroundRequestRequest, res *pb.StartBackgroundRequestResponse) error {
+ calls++
+ if calls > 1 {
+ t.Errorf("Too many calls to system.StartBackgroundRequest")
+ }
+ sbr <- true
+ res.RequestId = proto.String(id)
+ <-sendWait
+ return nil
+ })
+
+ var c2 context.Context // a fake
+ newContext = func(*http.Request) context.Context {
+ return c2
+ }
+
+ var fRun int
+ f := func(c3 context.Context) {
+ fRun++
+ if c3 != c2 {
+ t.Errorf("f got a different context than expected")
+ }
+ }
+
+ ribErrc := make(chan error)
+ go func() {
+ ribErrc <- RunInBackground(c, f)
+ }()
+
+ brErrc := make(chan error)
+ go func() {
+ <-sbr
+ req, err := http.NewRequest("GET", srv.URL+"/_ah/background", nil)
+ if err != nil {
+ brErrc <- fmt.Errorf("http.NewRequest: %v", err)
+ return
+ }
+ req.Header.Set("X-AppEngine-BackgroundRequest", id)
+ client := &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ },
+ }
+
+ <-recvWait
+ _, err = client.Do(req)
+ brErrc <- err
+ }()
+
+ // Send and receive are both waiting at this point.
+ waits := [2]chan bool{sendWait, recvWait}
+ if !sendFirst {
+ waits[0], waits[1] = waits[1], waits[0]
+ }
+ waits[0] <- true
+ time.Sleep(100 * time.Millisecond)
+ waits[1] <- true
+
+ if err := <-ribErrc; err != nil {
+ t.Fatalf("RunInBackground: %v", err)
+ }
+ if err := <-brErrc; err != nil {
+ t.Fatalf("background request: %v", err)
+ }
+
+ if fRun != 1 {
+ t.Errorf("Got %d runs of f, want 1", fRun)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go
new file mode 100644
index 000000000..5208f18f6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/doc.go
@@ -0,0 +1,209 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+
+Basic Operations
+
+Indexes contain documents. Each index is identified by its name: a
+human-readable ASCII string.
+
+Within an index, documents are associated with an ID, which is also
+a human-readable ASCII string. A document's contents are a mapping from
+case-sensitive field names to values. Valid types for field values are:
+ - string,
+ - search.Atom,
+ - search.HTML,
+ - time.Time (stored with millisecond precision),
+ - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+ - appengine.GeoPoint.
+
+The Get and Put methods on an Index load and save a document.
+A document's contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Doc struct {
+ Author string
+ Comment string
+ Creation time.Time
+ }
+
+ index, err := search.Open("comments")
+ if err != nil {
+ return err
+ }
+ newID, err := index.Put(ctx, "", &Doc{
+ Author: "gopher",
+ Comment: "the truth of the matter",
+ Creation: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+A single document can be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+ var doc Doc
+ err := index.Get(ctx, id, &doc)
+ if err != nil {
+ return err
+ }
+
+
+Search and Listing Documents
+
+Indexes have two methods for retrieving multiple documents at once: Search and
+List.
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+ for t := index.Search(ctx, "Comment:truth", nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+Search takes a string query to determine which documents to return. The query
+can be simple, such as a single word to match, or complex. The query
+language is described at
+https://cloud.google.com/appengine/docs/standard/go/search/query_strings
+
+Search also takes an optional SearchOptions struct which gives much more
+control over how results are calculated and returned.
+
+Call List to iterate over all documents in an index.
+
+ for t := index.List(ctx, nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+
+Fields and Facets
+
+A document's contents can be represented by a variety of types. These are
+typically struct pointers, but they can also be represented by any type
+implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata
+to be set for the document with the DocumentMetadata type. Struct pointers are
+more strongly typed and are easier to use; FieldLoadSavers are more flexible.
+
+A document's contents can be expressed in two ways: fields and facets.
+
+Fields are the most common way of providing content for documents. Fields can
+store data in multiple types and can be matched in searches using query
+strings.
+
+Facets provide a way to attach categorical information to a document. The only
+valid types for facets are search.Atom and float64. Facets allow search
+results to contain summaries of the categories matched in a search, and to
+restrict searches to only match against specific categories.
+
+By default, for struct pointers, all of the struct fields are used as document
+fields, and the field name used is the same as on the struct (and hence must
+start with an upper case letter). Struct fields may have a
+`search:"name,options"` tag. The name must start with a letter and be
+composed only of word characters. A "-" tag name means that the field will be
+ignored. If options is "facet" then the struct field will be used as a
+document facet. If options is "" then the comma may be omitted. There are no
+other recognized options.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and I are facets.
+ // D's tag is equivalent to having no tag at all (E).
+ // F and G are ignored entirely by the search package.
+ // I has tag information for both the search and json packages.
+ type TaggedStruct struct {
+ A float64 `search:"a,facet"`
+ B float64 `search:"b"`
+ C float64 `search:",facet"`
+ D float64 `search:""`
+ E float64
+ F float64 `search:"-"`
+ G float64 `search:"-,facet"`
+ I float64 `search:",facet" json:"i"`
+ }
+
+
+The FieldLoadSaver Interface
+
+A document's contents can also be represented by any type that implements the
+FieldLoadSaver interface. This type may be a struct pointer, but it
+does not have to be. The search package will call Load when loading the
+document's contents, and Save when saving them. In addition to a slice of
+Fields, the Load and Save methods also use the DocumentMetadata type to
+provide additional information about a document (such as its Rank, or set of
+Facets). Possible uses for this interface include deriving non-stored fields,
+verifying fields or setting specific languages for string and HTML fields.
+
+Example code:
+
+ type CustomFieldsExample struct {
+ // Item's title and which language it is in.
+ Title string
+ Lang string
+ // Mass, in grams.
+ Mass int
+ }
+
+ func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error {
+ // Load the title field, failing if any other field is found.
+ for _, f := range fields {
+ if f.Name != "title" {
+ return fmt.Errorf("unknown field %q", f.Name)
+ }
+ s, ok := f.Value.(string)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name)
+ }
+ x.Title = s
+ x.Lang = f.Language
+ }
+ // Load the mass facet, failing if any other facet is found.
+ for _, f := range meta.Facets {
+ if f.Name != "mass" {
+ return fmt.Errorf("unknown facet %q", f.Name)
+ }
+ m, ok := f.Value.(float64)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name)
+ }
+ x.Mass = int(m)
+ }
+ return nil
+ }
+
+ func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) {
+ fields := []search.Field{
+ {Name: "title", Value: x.Title, Language: x.Lang},
+ }
+ meta := &search.DocumentMetadata{
+ Facets: {
+ {Name: "mass", Value: float64(x.Mass)},
+ },
+ }
+ return fields, meta, nil
+ }
+*/
+package search
diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go
new file mode 100644
index 000000000..707c2d8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/field.go
@@ -0,0 +1,82 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+ // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/.
+ Name string
+ // Value is the field value. The valid types are:
+ // - string,
+ // - search.Atom,
+ // - search.HTML,
+ // - time.Time (stored with millisecond precision),
+ // - float64,
+ // - GeoPoint.
+ Value interface{}
+ // Language is a two-letter ISO 639-1 code for the field's language,
+ // defaulting to "en" if nothing is specified. It may only be specified for
+ // fields of type string and search.HTML.
+ Language string
+ // Derived marks fields that were calculated as a result of a
+ // FieldExpression provided to Search. This field is ignored when saving a
+ // document.
+ Derived bool
+}
+
+// Facet is a name/value pair which is used to add categorical information to a
+// document.
+type Facet struct {
+ // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/.
+ // A facet name cannot be longer than 500 characters.
+ Name string
+ // Value is the facet value.
+ //
+ // When being used in documents (for example, in
+ // DocumentMetadata.Facets), the valid types are:
+ // - search.Atom,
+ // - float64.
+ //
+ // When being used in SearchOptions.Refinements or being returned
+ // in FacetResult, the valid types are:
+ // - search.Atom,
+ // - search.Range.
+ Value interface{}
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+ // Rank is an integer specifying the order the document will be returned in
+ // search results. If zero, the rank will be set to the number of seconds since
+ // 2011-01-01 00:00:00 UTC when being Put into an index.
+ Rank int
+ // Facets is the set of facets for this document.
+ Facets []Facet
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+ Load([]Field, *DocumentMetadata) error
+ Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+ *l = append(*l, f...)
+ return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+ return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
new file mode 100644
index 000000000..35a567d62
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -0,0 +1,1189 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search // import "google.golang.org/appengine/search"
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+const maxDocumentsPerPutDelete = 200
+
+var (
+ // ErrInvalidDocumentType is returned when methods like Put, Get or Next
+ // are passed a dst or src argument of invalid type.
+ ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+ // ErrNoSuchDocument is returned when no document was found for a given ID.
+ ErrNoSuchDocument = errors.New("search: no such document")
+
+ // ErrTooManyDocuments is returned when the user passes too many documents to
+ // PutMulti or DeleteMulti.
+ ErrTooManyDocuments = fmt.Errorf("search: too many documents given to put or delete (max is %d)", maxDocumentsPerPutDelete)
+)
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foo<b>bar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+ if strings.HasPrefix(s, "!") {
+ return false
+ }
+ for _, c := range s {
+ if c < 0x21 || 0x7f <= c {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)
+ languageRE = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks
+// the validity of both field and facet names.
+func validFieldName(s string) bool {
+ return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+ return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+ return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+ return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+ spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+ if !validIndexNameOrDocID(name) {
+ return nil, fmt.Errorf("search: invalid index name %q", name)
+ }
+ return &Index{
+ spec: pb.IndexSpec{
+ Name: &name,
+ },
+ }, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {
+ ids, err := x.PutMulti(c, []string{id}, []interface{}{src})
+ if err != nil {
+ return "", err
+ }
+ return ids[0], nil
+}
+
+// PutMulti is like Put, but is more efficient for adding multiple documents to
+// the index at once.
+//
+// Up to 200 documents can be added at once. ErrTooManyDocuments is returned if
+// you try to add more.
+//
+// ids can either be an empty slice (which means new IDs will be allocated for
+// each of the documents added) or a slice the same size as srcs.
+//
+// The error may be an instance of appengine.MultiError, in which case it will
+// be the same size as srcs and the individual errors inside will correspond
+// with the items in srcs.
+func (x *Index) PutMulti(c context.Context, ids []string, srcs []interface{}) ([]string, error) {
+ if len(ids) != 0 && len(srcs) != len(ids) {
+ return nil, fmt.Errorf("search: PutMulti expects ids and srcs slices of the same length")
+ }
+ if len(srcs) > maxDocumentsPerPutDelete {
+ return nil, ErrTooManyDocuments
+ }
+
+ docs := make([]*pb.Document, len(srcs))
+ for i, s := range srcs {
+ var err error
+ docs[i], err = saveDoc(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(ids) != 0 && ids[i] != "" {
+ if !validIndexNameOrDocID(ids[i]) {
+ return nil, fmt.Errorf("search: invalid ID %q", ids[i])
+ }
+ docs[i].Id = proto.String(ids[i])
+ }
+ }
+
+ // spec is modified by Call when applying the current Namespace, so copy it to
+ // avoid retaining the namespace beyond the scope of the Call.
+ spec := x.spec
+ req := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: docs,
+ IndexSpec: &spec,
+ },
+ }
+ res := &pb.IndexDocumentResponse{}
+ if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil {
+ return nil, err
+ }
+ multiErr, hasErr := make(appengine.MultiError, len(res.Status)), false
+ for i, s := range res.Status {
+ if s.GetCode() != pb.SearchServiceError_OK {
+ multiErr[i] = fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ hasErr = true
+ }
+ }
+ if hasErr {
+ return res.DocId, multiErr
+ }
+
+ if len(res.Status) != len(docs) || len(res.DocId) != len(docs) {
+ return nil, fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs, expected %d)",
+ len(res.Status), len(res.DocId), len(docs))
+ }
+ return res.DocId, nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c context.Context, id string, dst interface{}) error {
+ if id == "" || !validIndexNameOrDocID(id) {
+ return fmt.Errorf("search: invalid ID %q", id)
+ }
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &x.spec,
+ StartDocId: proto.String(id),
+ Limit: proto.Int32(1),
+ },
+ }
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ if len(res.Document) != 1 || res.Document[0].GetId() != id {
+ return ErrNoSuchDocument
+ }
+ return loadDoc(dst, res.Document[0], nil)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c context.Context, id string) error {
+ return x.DeleteMulti(c, []string{id})
+}
+
+// DeleteMulti deletes multiple documents from the index.
+//
+// The returned error may be an instance of appengine.MultiError, in which case
+// it will be the same size as srcs and the individual errors inside will
+// correspond with the items in srcs.
+func (x *Index) DeleteMulti(c context.Context, ids []string) error {
+ if len(ids) > maxDocumentsPerPutDelete {
+ return ErrTooManyDocuments
+ }
+
+ req := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: ids,
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.DeleteDocumentResponse{}
+ if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil {
+ return err
+ }
+ if len(res.Status) != len(ids) {
+ return fmt.Errorf("search: internal error: wrong number of results (%d, expected %d)",
+ len(res.Status), len(ids))
+ }
+ multiErr, hasErr := make(appengine.MultiError, len(ids)), false
+ for i, s := range res.Status {
+ if s.GetCode() != pb.SearchServiceError_OK {
+ multiErr[i] = fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ hasErr = true
+ }
+ }
+ if hasErr {
+ return multiErr
+ }
+ return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c context.Context, opts *ListOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ count: -1,
+ listInclusive: true,
+ more: moreList,
+ }
+ if opts != nil {
+ t.listStartID = opts.StartID
+ t.limit = opts.Limit
+ t.idsOnly = opts.IDsOnly
+ }
+ return t
+}
+
+func moreList(t *Iterator) error {
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &t.index.spec,
+ },
+ }
+ if t.listStartID != "" {
+ req.Params.StartDocId = &t.listStartID
+ req.Params.IncludeStartDoc = &t.listInclusive
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.listRes = res.Document
+ t.listStartID, t.listInclusive, t.more = "", false, nil
+ if len(res.Document) != 0 && t.limit <= 0 {
+ if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+ t.listStartID, t.more = id, moreList
+ }
+ }
+ return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+ // StartID is the inclusive lower bound for the ID of the returned
+ // documents. The zero value means all documents will be returned.
+ StartID string
+
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the list
+ // operation; no document fields are populated.
+ IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ searchQuery: query,
+ more: moreSearch,
+ }
+ if opts != nil {
+ if opts.Cursor != "" {
+ if opts.Offset != 0 {
+ return errIter("at most one of Cursor and Offset may be specified")
+ }
+ t.searchCursor = proto.String(string(opts.Cursor))
+ }
+ t.limit = opts.Limit
+ t.fields = opts.Fields
+ t.idsOnly = opts.IDsOnly
+ t.sort = opts.Sort
+ t.exprs = opts.Expressions
+ t.refinements = opts.Refinements
+ t.facetOpts = opts.Facets
+ t.searchOffset = opts.Offset
+ t.countAccuracy = opts.CountAccuracy
+ }
+ return t
+}
+
+func moreSearch(t *Iterator) error {
+ // We use per-result (rather than single/per-page) cursors since this
+ // lets us return a Cursor for every iterator document. The two cursor
+ // types are largely interchangeable: a page cursor is the same as the
+ // last per-result cursor in a given search response.
+ req := &pb.SearchRequest{
+ Params: &pb.SearchParams{
+ IndexSpec: &t.index.spec,
+ Query: &t.searchQuery,
+ Cursor: t.searchCursor,
+ CursorType: pb.SearchParams_PER_RESULT.Enum(),
+ FieldSpec: &pb.FieldSpec{
+ Name: t.fields,
+ },
+ },
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.searchOffset > 0 {
+ req.Params.Offset = proto.Int32(int32(t.searchOffset))
+ t.searchOffset = 0
+ }
+ if t.countAccuracy > 0 {
+ req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+ if t.sort != nil {
+ if err := sortToProto(t.sort, req.Params); err != nil {
+ return err
+ }
+ }
+ if t.refinements != nil {
+ if err := refinementsToProto(t.refinements, req.Params); err != nil {
+ return err
+ }
+ }
+ for _, e := range t.exprs {
+ req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+ Name: proto.String(e.Name),
+ Expression: proto.String(e.Expr),
+ })
+ }
+ for _, f := range t.facetOpts {
+ if err := f.setParams(req.Params); err != nil {
+ return fmt.Errorf("bad FacetSearchOption: %v", err)
+ }
+ }
+ // Don't repeat facet search.
+ t.facetOpts = nil
+
+ res := &pb.SearchResponse{}
+ if err := internal.Call(t.c, "search", "Search", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.searchRes = res.Result
+ if len(res.FacetResult) > 0 {
+ t.facetRes = res.FacetResult
+ }
+ t.count = int(*res.MatchedCount)
+ if t.limit > 0 {
+ t.more = nil
+ } else {
+ t.more = moreSearch
+ }
+ return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the search
+ // operation; no document fields are populated.
+ IDsOnly bool
+
+ // Sort controls the ordering of search results.
+ Sort *SortOptions
+
+ // Fields specifies which document fields to include in the results. If omitted,
+ // all document fields are returned. No more than 100 fields may be specified.
+ Fields []string
+
+ // Expressions specifies additional computed fields to add to each returned
+ // document.
+ Expressions []FieldExpression
+
+ // Facets controls what facet information is returned for these search results.
+ // If no options are specified, no facet results will be returned.
+ Facets []FacetSearchOption
+
+ // Refinements filters the returned documents by requiring them to contain facets
+ // with specific values. Refinements are applied in conjunction for facets with
+ // different names, and in disjunction otherwise.
+ Refinements []Facet
+
+ // Cursor causes the results to commence with the first document after
+ // the document associated with the cursor.
+ Cursor Cursor
+
+ // Offset specifies the number of documents to skip over before returning results.
+ // When specified, Cursor must be nil.
+ Offset int
+
+ // CountAccuracy specifies the maximum result count that can be expected to
+ // be accurate. If zero, the count accuracy defaults to 20.
+ CountAccuracy int
+}
+
+// Cursor represents an iterator's position.
+//
+// The string value of a cursor is web-safe. It can be saved and restored
+// for later use.
+type Cursor string
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+ // Name is the name to use for the computed field.
+ Name string
+
+ // Expr is evaluated to provide a custom content snippet for each document.
+ // See https://cloud.google.com/appengine/docs/standard/go/search/options for
+ // the supported expression syntax.
+ Expr string
+}
+
+// FacetSearchOption controls what facet information is returned in search results.
+type FacetSearchOption interface {
+ setParams(*pb.SearchParams) error
+}
+
+// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet
+// discovery for the search. Automatic facet discovery looks for the facets
+// which appear the most often in the aggregate in the matched documents.
+//
+// The maximum number of facets returned is controlled by facetLimit, and the
+// maximum number of values per facet by facetLimit. A limit of zero indicates
+// a default limit should be used.
+func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption {
+ return &autoFacetOpt{facetLimit, valueLimit}
+}
+
+type autoFacetOpt struct {
+ facetLimit, valueLimit int
+}
+
+const defaultAutoFacetLimit = 10 // As per python runtime search.py.
+
+func (o *autoFacetOpt) setParams(params *pb.SearchParams) error {
+ lim := int32(o.facetLimit)
+ if lim == 0 {
+ lim = defaultAutoFacetLimit
+ }
+ params.AutoDiscoverFacetCount = &lim
+ if o.valueLimit > 0 {
+ params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(int32(o.valueLimit)),
+ }
+ }
+ return nil
+}
+
+// FacetDiscovery returns a FacetSearchOption which selects a facet to be
+// returned with the search results. By default, the most frequently
+// occurring values for that facet will be returned. However, you can also
+// specify a list of particular Atoms or specific Ranges to return.
+func FacetDiscovery(name string, value ...interface{}) FacetSearchOption {
+ return &facetOpt{name, value}
+}
+
+type facetOpt struct {
+ name string
+ values []interface{}
+}
+
+func (o *facetOpt) setParams(params *pb.SearchParams) error {
+ req := &pb.FacetRequest{Name: &o.name}
+ params.IncludeFacet = append(params.IncludeFacet, req)
+ if len(o.values) == 0 {
+ return nil
+ }
+ vtype := reflect.TypeOf(o.values[0])
+ reqParam := &pb.FacetRequestParam{}
+ for _, v := range o.values {
+ if reflect.TypeOf(v) != vtype {
+ return errors.New("values must all be Atom, or must all be Range")
+ }
+ switch v := v.(type) {
+ case Atom:
+ reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("invalid range: %v", err)
+ }
+ reqParam.Range = append(reqParam.Range, rng)
+ default:
+ return fmt.Errorf("unsupported value type %T", v)
+ }
+ }
+ req.Params = reqParam
+ return nil
+}
+
+// FacetDocumentDepth returns a FacetSearchOption which controls the number of
+// documents to be evaluated with preparing facet results.
+func FacetDocumentDepth(depth int) FacetSearchOption {
+ return facetDepthOpt(depth)
+}
+
+type facetDepthOpt int
+
+func (o facetDepthOpt) setParams(params *pb.SearchParams) error {
+ params.FacetDepth = proto.Int32(int32(o))
+ return nil
+}
+
+// FacetResult represents the number of times a particular facet and value
+// appeared in the documents matching a search request.
+type FacetResult struct {
+ Facet
+
+ // Count is the number of times this specific facet and value appeared in the
+ // matching documents.
+ Count int
+}
+
+// Range represents a numeric range with inclusive start and exclusive end.
+// Start may be specified as math.Inf(-1) to indicate there is no minimum
+// value, and End may similarly be specified as math.Inf(1); at least one of
+// Start or End must be a finite number.
+type Range struct {
+ Start, End float64
+}
+
+var (
+ negInf = math.Inf(-1)
+ posInf = math.Inf(1)
+)
+
+// AtLeast returns a Range matching any value greater than, or equal to, min.
+func AtLeast(min float64) Range {
+ return Range{Start: min, End: posInf}
+}
+
+// LessThan returns a Range matching any value less than max.
+func LessThan(max float64) Range {
+ return Range{Start: negInf, End: max}
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+ // Expressions is a slice of expressions representing a multi-dimensional
+ // sort.
+ Expressions []SortExpression
+
+ // Scorer, when specified, will cause the documents to be scored according to
+ // search term frequency.
+ Scorer Scorer
+
+ // Limit is the maximum number of objects to score and/or sort. Limit cannot
+ // be more than 10,000. The zero value indicates a default limit.
+ Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+ // Expr is evaluated to provide a sorting value for each document.
+ // See https://cloud.google.com/appengine/docs/standard/go/search/options for
+ // the supported expression syntax.
+ Expr string
+
+ // Reverse causes the documents to be sorted in ascending order.
+ Reverse bool
+
+ // The default value to use when no field is present or the expresion
+ // cannot be calculated for a document. For text sorts, Default must
+ // be of type string; for numeric sorts, float64.
+ Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+ toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+ enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+ spec.Scorer = e.enum.Enum()
+}
+
+var (
+ // MatchScorer assigns a score based on term frequency in a document.
+ MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+ // RescoringMatchScorer assigns a score based on the quality of the query
+ // match. It is similar to a MatchScorer but uses a more complex scoring
+ // algorithm based on match term frequency and other factors like field type.
+ // Please be aware that this algorithm is continually refined and can change
+ // over time without notice. This means that the ordering of search results
+ // that use this scorer can also change without notice.
+ RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+ for _, e := range sort.Expressions {
+ spec := &pb.SortSpec{
+ SortExpression: proto.String(e.Expr),
+ }
+ if e.Reverse {
+ spec.SortDescending = proto.Bool(false)
+ }
+ if e.Default != nil {
+ switch d := e.Default.(type) {
+ case float64:
+ spec.DefaultValueNumeric = &d
+ case string:
+ spec.DefaultValueText = &d
+ default:
+ return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+ }
+ }
+ params.SortSpec = append(params.SortSpec, spec)
+ }
+
+ spec := &pb.ScorerSpec{}
+ if sort.Limit > 0 {
+ spec.Limit = proto.Int32(int32(sort.Limit))
+ params.ScorerSpec = spec
+ }
+ if sort.Scorer != nil {
+ sort.Scorer.toProto(spec)
+ params.ScorerSpec = spec
+ }
+
+ return nil
+}
+
+func refinementsToProto(refinements []Facet, params *pb.SearchParams) error {
+ for _, r := range refinements {
+ ref := &pb.FacetRefinement{
+ Name: proto.String(r.Name),
+ }
+ switch v := r.Value.(type) {
+ case Atom:
+ ref.Value = proto.String(string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err)
+ }
+ // Unfortunately there are two identical messages for identify Facet ranges.
+ ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End}
+ default:
+ return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v)
+ }
+ params.FacetRefinement = append(params.FacetRefinement, ref)
+ }
+ return nil
+}
+
+func rangeToProto(r Range) (*pb.FacetRange, error) {
+ rng := &pb.FacetRange{}
+ if r.Start != negInf {
+ if !validFloat(r.Start) {
+ return nil, errors.New("invalid value for Start")
+ }
+ rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64))
+ } else if r.End == posInf {
+ return nil, errors.New("either Start or End must be finite")
+ }
+ if r.End != posInf {
+ if !validFloat(r.End) {
+ return nil, errors.New("invalid value for End")
+ }
+ rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64))
+ }
+ return rng, nil
+}
+
+func protoToRange(rng *pb.FacetRefinement_Range) Range {
+ r := Range{Start: negInf, End: posInf}
+ if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil {
+ r.Start = x
+ }
+ if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil {
+ r.End = x
+ }
+ return r
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+ c context.Context
+ index *Index
+ err error
+
+ listRes []*pb.Document
+ listStartID string
+ listInclusive bool
+
+ searchRes []*pb.SearchResult
+ facetRes []*pb.FacetResult
+ searchQuery string
+ searchCursor *string
+ searchOffset int
+ sort *SortOptions
+
+ fields []string
+ exprs []FieldExpression
+ refinements []Facet
+ facetOpts []FacetSearchOption
+
+ more func(*Iterator) error
+
+ count int
+ countAccuracy int
+ limit int // items left to return; 0 for unlimited.
+ idsOnly bool
+}
+
+// errIter returns an iterator that only returns the given error.
+func errIter(err string) *Iterator {
+ return &Iterator{
+ err: errors.New(err),
+ }
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// fetchMore retrieves more results, if there are no errors or pending results.
+func (t *Iterator) fetchMore() {
+ if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+ t.err = t.more(t)
+ }
+}
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+ t.fetchMore()
+ if t.err != nil {
+ return "", t.err
+ }
+
+ var doc *pb.Document
+ var exprs []*pb.Field
+ switch {
+ case len(t.listRes) != 0:
+ doc = t.listRes[0]
+ t.listRes = t.listRes[1:]
+ case len(t.searchRes) != 0:
+ doc = t.searchRes[0].Document
+ exprs = t.searchRes[0].Expression
+ t.searchCursor = t.searchRes[0].Cursor
+ t.searchRes = t.searchRes[1:]
+ default:
+ return "", Done
+ }
+ if doc == nil {
+ return "", errors.New("search: internal error: no document returned")
+ }
+ if !t.idsOnly && dst != nil {
+ if err := loadDoc(dst, doc, exprs); err != nil {
+ return "", err
+ }
+ }
+ return doc.GetId(), nil
+}
+
+// Cursor returns the cursor associated with the current document (that is,
+// the document most recently returned by a call to Next).
+//
+// Passing this cursor in a future call to Search will cause those results
+// to commence with the first document after the current document.
+func (t *Iterator) Cursor() Cursor {
+ if t.searchCursor == nil {
+ return ""
+ }
+ return Cursor(*t.searchCursor)
+}
+
+// Facets returns the facets found within the search results, if any facets
+// were requested in the SearchOptions.
+func (t *Iterator) Facets() ([][]FacetResult, error) {
+ t.fetchMore()
+ if t.err != nil && t.err != Done {
+ return nil, t.err
+ }
+
+ var facets [][]FacetResult
+ for _, f := range t.facetRes {
+ fres := make([]FacetResult, 0, len(f.Value))
+ for _, v := range f.Value {
+ ref := v.Refinement
+ facet := FacetResult{
+ Facet: Facet{Name: ref.GetName()},
+ Count: int(v.GetCount()),
+ }
+ if ref.Value != nil {
+ facet.Value = Atom(*ref.Value)
+ } else {
+ facet.Value = protoToRange(ref.Range)
+ }
+ fres = append(fres, facet)
+ }
+ facets = append(facets, fres)
+ }
+ return facets, nil
+}
+
+// saveDoc converts from a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf.
+func saveDoc(src interface{}) (*pb.Document, error) {
+ var err error
+ var fields []Field
+ var meta *DocumentMetadata
+ switch x := src.(type) {
+ case FieldLoadSaver:
+ fields, meta, err = x.Save()
+ default:
+ fields, meta, err = saveStructWithMeta(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ fieldsProto, err := fieldsToProto(fields)
+ if err != nil {
+ return nil, err
+ }
+ d := &pb.Document{
+ Field: fieldsProto,
+ OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if meta != nil {
+ if meta.Rank != 0 {
+ if !validDocRank(meta.Rank) {
+ return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+ }
+ *d.OrderId = int32(meta.Rank)
+ d.OrderIdSource = pb.Document_SUPPLIED.Enum()
+ }
+ if len(meta.Facets) > 0 {
+ facets, err := facetsToProto(meta.Facets)
+ if err != nil {
+ return nil, err
+ }
+ d.Facet = facets
+ }
+ }
+ return d, nil
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+ // Maps to catch duplicate time or numeric fields.
+ timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+ dst := make([]*pb.Field, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+ }
+ fieldValue := &pb.FieldValue{}
+ switch x := f.Value.(type) {
+ case string:
+ fieldValue.Type = pb.FieldValue_TEXT.Enum()
+ fieldValue.StringValue = proto.String(x)
+ case Atom:
+ fieldValue.Type = pb.FieldValue_ATOM.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case HTML:
+ fieldValue.Type = pb.FieldValue_HTML.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case time.Time:
+ if timeFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+ }
+ timeFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_DATE.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+ case float64:
+ if numericFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+ }
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+ }
+ numericFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ case appengine.GeoPoint:
+ if !x.Valid() {
+ return nil, fmt.Errorf(
+ "search: GeoPoint field %q with invalid value %v",
+ f.Name, x)
+ }
+ fieldValue.Type = pb.FieldValue_GEO.Enum()
+ fieldValue.Geo = &pb.FieldValue_Geo{
+ Lat: proto.Float64(x.Lat),
+ Lng: proto.Float64(x.Lng),
+ }
+ default:
+ return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+ }
+ if f.Language != "" {
+ switch f.Value.(type) {
+ case string, HTML:
+ if !validLanguage(f.Language) {
+ return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+ }
+ fieldValue.Language = proto.String(f.Language)
+ default:
+ return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+ }
+ }
+ if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+ return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+ }
+ dst = append(dst, &pb.Field{
+ Name: proto.String(f.Name),
+ Value: fieldValue,
+ })
+ }
+ return dst, nil
+}
+
+func facetsToProto(src []Facet) ([]*pb.Facet, error) {
+ dst := make([]*pb.Facet, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid facet name %q", f.Name)
+ }
+ facetValue := &pb.FacetValue{}
+ switch x := f.Value.(type) {
+ case Atom:
+ if !utf8.ValidString(string(x)) {
+ return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_ATOM.Enum()
+ facetValue.StringValue = proto.String(string(x))
+ case float64:
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_NUMBER.Enum()
+ facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ default:
+ return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value))
+ }
+ dst = append(dst, &pb.Facet{
+ Name: proto.String(f.Name),
+ Value: facetValue,
+ })
+ }
+ return dst, nil
+}
+
+// loadDoc converts from protobufs to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's
+// stored fields and facets, and any document metadata. An additional slice of
+// fields, exprs, may optionally be provided to contain any derived expressions
+// requested by the developer.
+func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) {
+ fields, err := protoToFields(src.Field)
+ if err != nil {
+ return err
+ }
+ facets, err := protoToFacets(src.Facet)
+ if err != nil {
+ return err
+ }
+ if len(exprs) > 0 {
+ exprFields, err := protoToFields(exprs)
+ if err != nil {
+ return err
+ }
+ // Mark each field as derived.
+ for i := range exprFields {
+ exprFields[i].Derived = true
+ }
+ fields = append(fields, exprFields...)
+ }
+ meta := &DocumentMetadata{
+ Rank: int(src.GetOrderId()),
+ Facets: facets,
+ }
+ switch x := dst.(type) {
+ case FieldLoadSaver:
+ return x.Load(fields, meta)
+ default:
+ return loadStructWithMeta(dst, fields, meta)
+ }
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+ dst := make([]Field, 0, len(fields))
+ for _, field := range fields {
+ fieldValue := field.GetValue()
+ f := Field{
+ Name: field.GetName(),
+ }
+ switch fieldValue.GetType() {
+ case pb.FieldValue_TEXT:
+ f.Value = fieldValue.GetStringValue()
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_ATOM:
+ f.Value = Atom(fieldValue.GetStringValue())
+ case pb.FieldValue_HTML:
+ f.Value = HTML(fieldValue.GetStringValue())
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_DATE:
+ sv := fieldValue.GetStringValue()
+ millis, err := strconv.ParseInt(sv, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+ }
+ f.Value = time.Unix(0, millis*1e6)
+ case pb.FieldValue_NUMBER:
+ sv := fieldValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ case pb.FieldValue_GEO:
+ geoValue := fieldValue.GetGeo()
+ geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+ if !geoPoint.Valid() {
+ return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+ }
+ f.Value = geoPoint
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func protoToFacets(facets []*pb.Facet) ([]Facet, error) {
+ if len(facets) == 0 {
+ return nil, nil
+ }
+ dst := make([]Facet, 0, len(facets))
+ for _, facet := range facets {
+ facetValue := facet.GetValue()
+ f := Facet{
+ Name: facet.GetName(),
+ }
+ switch facetValue.GetType() {
+ case pb.FacetValue_ATOM:
+ f.Value = Atom(facetValue.GetStringValue())
+ case pb.FacetValue_NUMBER:
+ sv := facetValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ set := func(s **string) {
+ if *s == nil {
+ *s = &namespace
+ }
+ }
+ switch m := m.(type) {
+ case *pb.IndexDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.ListDocumentsRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.DeleteDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.SearchRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+ internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/search/search_test.go b/vendor/google.golang.org/appengine/search/search_test.go
new file mode 100644
index 000000000..ef1409c19
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search_test.go
@@ -0,0 +1,1270 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+type TestDoc struct {
+ String string
+ Atom Atom
+ HTML HTML
+ Float float64
+ Location appengine.GeoPoint
+ Time time.Time
+}
+
+type FieldListWithMeta struct {
+ Fields FieldList
+ Meta *DocumentMetadata
+}
+
+func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error {
+ f.Meta = meta
+ return f.Fields.Load(fields, nil)
+}
+
+func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) {
+ fields, _, err := f.Fields.Save()
+ return fields, f.Meta, err
+}
+
+// Assert that FieldListWithMeta satisfies FieldLoadSaver
+var _ FieldLoadSaver = &FieldListWithMeta{}
+
+var (
+ float = 3.14159
+ floatOut = "3.14159e+00"
+ latitude = 37.3894
+ longitude = 122.0819
+ testGeo = appengine.GeoPoint{latitude, longitude}
+ testString = "foo<b>bar"
+ testTime = time.Unix(1337324400, 0)
+ testTimeOut = "1337324400000"
+ searchMeta = &DocumentMetadata{
+ Rank: 42,
+ }
+ searchDoc = TestDoc{
+ String: testString,
+ Atom: Atom(testString),
+ HTML: HTML(testString),
+ Float: float,
+ Location: testGeo,
+ Time: testTime,
+ }
+ searchFields = FieldList{
+ Field{Name: "String", Value: testString},
+ Field{Name: "Atom", Value: Atom(testString)},
+ Field{Name: "HTML", Value: HTML(testString)},
+ Field{Name: "Float", Value: float},
+ Field{Name: "Location", Value: testGeo},
+ Field{Name: "Time", Value: testTime},
+ }
+ // searchFieldsWithLang is a copy of the searchFields with the Language field
+ // set on text/HTML Fields.
+ searchFieldsWithLang = FieldList{}
+ protoFields = []*pb.Field{
+ newStringValueField("String", testString, pb.FieldValue_TEXT),
+ newStringValueField("Atom", testString, pb.FieldValue_ATOM),
+ newStringValueField("HTML", testString, pb.FieldValue_HTML),
+ newStringValueField("Float", floatOut, pb.FieldValue_NUMBER),
+ {
+ Name: proto.String("Location"),
+ Value: &pb.FieldValue{
+ Geo: &pb.FieldValue_Geo{
+ Lat: proto.Float64(latitude),
+ Lng: proto.Float64(longitude),
+ },
+ Type: pb.FieldValue_GEO.Enum(),
+ },
+ },
+ newStringValueField("Time", testTimeOut, pb.FieldValue_DATE),
+ }
+)
+
+func init() {
+ for _, f := range searchFields {
+ if f.Name == "String" || f.Name == "HTML" {
+ f.Language = "en"
+ }
+ searchFieldsWithLang = append(searchFieldsWithLang, f)
+ }
+}
+
+func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field {
+ return &pb.Field{
+ Name: proto.String(name),
+ Value: &pb.FieldValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func newFacet(name, value string, valueType pb.FacetValue_ContentType) *pb.Facet {
+ return &pb.Facet{
+ Name: proto.String(name),
+ Value: &pb.FacetValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func TestValidIndexNameOrDocID(t *testing.T) {
+ testCases := []struct {
+ s string
+ want bool
+ }{
+ {"", true},
+ {"!", false},
+ {"$", true},
+ {"!bad", false},
+ {"good!", true},
+ {"alsoGood", true},
+ {"has spaces", false},
+ {"is_inva\xffid_UTF-8", false},
+ {"is_non-ASCïI", false},
+ {"underscores_are_ok", true},
+ }
+ for _, tc := range testCases {
+ if got := validIndexNameOrDocID(tc.s); got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.s, got, tc.want)
+ }
+ }
+}
+
+func TestLoadDoc(t *testing.T) {
+ got, want := TestDoc{}, searchDoc
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if got != want {
+ t.Errorf("loadDoc: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSaveDoc(t *testing.T) {
+ got, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveDocUsesDefaultedRankIfNotSpecified(t *testing.T) {
+ got, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ orderIdSource := got.GetOrderIdSource()
+ if orderIdSource != pb.Document_DEFAULTED {
+ t.Errorf("OrderIdSource: got %v, wanted DEFAULTED", orderIdSource)
+ }
+}
+
+func TestLoadFieldList(t *testing.T) {
+ var got FieldList
+ want := searchFieldsWithLang
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLangFields(t *testing.T) {
+ fl := &FieldList{
+ {Name: "Foo", Value: "I am English", Language: "en"},
+ {Name: "Bar", Value: "私は日本人だ", Language: "jp"},
+ }
+ var got FieldList
+ doc, err := saveDoc(fl)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if want := fl; !reflect.DeepEqual(&got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveFieldList(t *testing.T) {
+ got, err := saveDoc(&searchFields)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldAndExprList(t *testing.T) {
+ var got, want FieldList
+ for i, f := range searchFieldsWithLang {
+ f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions".
+ want = append(want, f)
+ }
+ doc, expr := &pb.Document{Field: protoFields[:2]}, protoFields[2:]
+ if err := loadDoc(&got, doc, expr); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadMeta(t *testing.T) {
+ var got FieldListWithMeta
+ want := FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFieldsWithLang,
+ }
+ doc := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ OrderIdSource: pb.Document_SUPPLIED.Enum(),
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMeta(t *testing.T) {
+ got, err := saveDoc(&FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ OrderIdSource: pb.Document_SUPPLIED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMetaWithDefaultedRank(t *testing.T) {
+ metaWithoutRank := &DocumentMetadata{
+ Rank: 0,
+ }
+ got, err := saveDoc(&FieldListWithMeta{
+ Meta: metaWithoutRank,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: got.OrderId,
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveWithoutMetaUsesDefaultedRank(t *testing.T) {
+ got, err := saveDoc(&FieldListWithMeta{
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: got.OrderId,
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadSaveWithStruct(t *testing.T) {
+ type gopher struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }
+
+ doc := gopher{"Gopher", "Likes slide rules.", 4, Atom("furry")}
+ pb := &pb.Document{
+ Field: []*pb.Field{
+ newStringValueField("Name", "Gopher", pb.FieldValue_TEXT),
+ newStringValueField("about", "Likes slide rules.", pb.FieldValue_TEXT),
+ },
+ Facet: []*pb.Facet{
+ newFacet("Legs", "4e+00", pb.FacetValue_NUMBER),
+ newFacet("Fur", "furry", pb.FacetValue_ATOM),
+ },
+ }
+
+ var gotDoc gopher
+ if err := loadDoc(&gotDoc, pb, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(gotDoc, doc) {
+ t.Errorf("loading doc\ngot %v\nwant %v", gotDoc, doc)
+ }
+
+ gotPB, err := saveDoc(&doc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ gotPB.OrderId = nil // Don't test: it's time dependent.
+ gotPB.OrderIdSource = nil // Don't test because it's contingent on OrderId.
+ if !proto.Equal(gotPB, pb) {
+ t.Errorf("saving doc\ngot %v\nwant %v", gotPB, pb)
+ }
+}
+
+func TestValidFieldNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ valid bool
+ }{
+ {"Normal", true},
+ {"Also_OK_123", true},
+ {"Not so great", false},
+ {"lower_case", true},
+ {"Exclaim!", false},
+ {"Hello세상아 안녕", false},
+ {"", false},
+ {"Hεllo", false},
+ {strings.Repeat("A", 500), true},
+ {strings.Repeat("A", 501), false},
+ }
+
+ for _, tc := range testCases {
+ _, err := saveDoc(&FieldList{
+ Field{Name: tc.name, Value: "val"},
+ })
+ if err != nil && !strings.Contains(err.Error(), "invalid field name") {
+ t.Errorf("unexpected err %q for field name %q", err, tc.name)
+ }
+ if (err == nil) != tc.valid {
+ t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err)
+ }
+ }
+}
+
+func TestValidLangs(t *testing.T) {
+ testCases := []struct {
+ field Field
+ valid bool
+ }{
+ {Field{Name: "Foo", Value: "String", Language: ""}, true},
+ {Field{Name: "Foo", Value: "String", Language: "en"}, true},
+ {Field{Name: "Foo", Value: "String", Language: "aussie"}, false},
+ {Field{Name: "Foo", Value: "String", Language: "12"}, false},
+ {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true},
+ {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false},
+ {Field{Name: "Foo", Value: 42, Language: "en"}, false},
+ }
+
+ for _, tt := range testCases {
+ _, err := saveDoc(&FieldList{tt.field})
+ if err == nil != tt.valid {
+ t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid)
+ }
+ }
+}
+
+func TestDuplicateFields(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields FieldList
+ errMsg string // Non-empty if we expect an error
+ }{
+ {
+ desc: "multi string",
+ fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}},
+ },
+ {
+ desc: "multi atom",
+ fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}},
+ },
+ {
+ desc: "mixed",
+ fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}},
+ },
+ {
+ desc: "multi time",
+ fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}},
+ errMsg: `duplicate time field "FieldA"`,
+ },
+ {
+ desc: "multi num",
+ fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}},
+ errMsg: `duplicate numeric field "FieldA"`,
+ },
+ }
+ for _, tc := range testCases {
+ _, err := saveDoc(&tc.fields)
+ if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) {
+ t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg)
+ }
+ }
+}
+
+func TestLoadErrFieldMismatch(t *testing.T) {
+ testCases := []struct {
+ desc string
+ dst interface{}
+ src []*pb.Field
+ err error
+ }{
+ {
+ desc: "missing",
+ dst: &struct{ One string }{},
+ src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Two",
+ Reason: "no such struct field",
+ },
+ },
+ {
+ desc: "wrong type",
+ dst: &struct{ Num float64 }{},
+ src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Num",
+ Reason: "type mismatch: float64 for string data",
+ },
+ },
+ {
+ desc: "unsettable",
+ dst: &struct{ lower string }{},
+ src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "lower",
+ Reason: "cannot set struct field",
+ },
+ },
+ }
+ for _, tc := range testCases {
+ err := loadDoc(tc.dst, &pb.Document{Field: tc.src}, nil)
+ if !reflect.DeepEqual(err, tc.err) {
+ t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err)
+ }
+ }
+}
+
+func TestLimit(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error {
+ limit := 20 // Default per page.
+ if req.Params.Limit != nil {
+ limit = int(*req.Params.Limit)
+ }
+ res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()}
+ res.MatchedCount = proto.Int64(int64(limit))
+ for i := 0; i < limit; i++ {
+ res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}})
+ res.Cursor = proto.String("moreresults")
+ }
+ return nil
+ })
+
+ const maxDocs = 500 // Limit maximum number of docs.
+ testCases := []struct {
+ limit, want int
+ }{
+ {limit: 0, want: maxDocs},
+ {limit: 42, want: 42},
+ {limit: 100, want: 100},
+ {limit: 1000, want: maxDocs},
+ }
+
+ for _, tt := range testCases {
+ it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true})
+ count := 0
+ for ; count < maxDocs; count++ {
+ _, err := it.Next(nil)
+ if err == Done {
+ break
+ }
+ if err != nil {
+ t.Fatalf("err after %d: %v", count, err)
+ }
+ }
+ if count != tt.want {
+ t.Errorf("got %d results, expected %d", count, tt.want)
+ }
+ }
+}
+
+func TestPut(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ expectedIn := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{
+ {Field: protoFields, OrderId: proto.Int32(42), OrderIdSource: pb.Document_SUPPLIED.Enum()},
+ },
+ IndexSpec: &pb.IndexSpec{
+ Name: proto.String("Doc"),
+ },
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ id, err := index.Put(c, "", &FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := "doc_id"; id != want {
+ t.Errorf("Got doc ID %q, want %q", id, want)
+ }
+}
+
+func TestPutAutoOrderID(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("expected at least one Document, got %v", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.Put(c, "", &searchFields); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutBadStatus(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {
+ Code: pb.SearchServiceError_INVALID_REQUEST.Enum(),
+ ErrorDetail: proto.String("insufficient gophers"),
+ },
+ },
+ }
+ return nil
+ })
+
+ wantErr := "search: INVALID_REQUEST: insufficient gophers"
+ if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr {
+ t.Fatalf("Put: got %v error, want %q", err, wantErr)
+ }
+}
+
+func TestPutMultiNilIDSlice(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("got %v, want at least 1 document", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.PutMulti(c, nil, []interface{}{&searchFields}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutMultiError(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_PERMISSION_DENIED.Enum(), ErrorDetail: proto.String("foo")},
+ },
+ DocId: []string{
+ "id1",
+ "",
+ },
+ }
+ return nil
+ })
+
+ switch _, err := index.PutMulti(c, nil, []interface{}{&searchFields, &searchFields}); {
+ case err == nil:
+ t.Fatalf("got nil, want error")
+ case err.(appengine.MultiError)[0] != nil:
+ t.Fatalf("got %v, want nil MultiError[0]", err.(appengine.MultiError)[0])
+ case err.(appengine.MultiError)[1] == nil:
+ t.Fatalf("got nil, want not-nill MultiError[1]")
+ }
+}
+
+func TestPutMultiWrongNumberOfIDs(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ return nil
+ })
+
+ if _, err := index.PutMulti(c, []string{"a"}, []interface{}{&searchFields, &searchFields}); err == nil {
+ t.Fatal("got success, want error")
+ }
+}
+
+func TestPutMultiTooManyDocs(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ return nil
+ })
+
+ srcs := make([]interface{}, 201)
+ for i, _ := range srcs {
+ srcs[i] = &searchFields
+ }
+
+ if _, err := index.PutMulti(c, nil, srcs); err != ErrTooManyDocuments {
+ t.Fatalf("got %v, want ErrTooManyDocuments", err)
+ }
+}
+
+func TestSortOptions(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ sort *SortOptions
+ wantSort []*pb.SortSpec
+ wantScorer *pb.ScorerSpec
+ wantErr string
+ }{
+ {
+ desc: "No SortOptions",
+ },
+ {
+ desc: "Basic",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog"},
+ {Expr: "cat", Reverse: true},
+ {Expr: "gopher", Default: "blue"},
+ {Expr: "fish", Default: 2.0},
+ },
+ Limit: 42,
+ Scorer: MatchScorer,
+ },
+ wantSort: []*pb.SortSpec{
+ {SortExpression: proto.String("dog")},
+ {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)},
+ {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")},
+ {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)},
+ },
+ wantScorer: &pb.ScorerSpec{
+ Limit: proto.Int32(42),
+ Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(),
+ },
+ },
+ {
+ desc: "Bad expression default",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog", Default: true},
+ },
+ },
+ wantErr: `search: invalid Default type bool for expression "dog"`,
+ },
+ {
+ desc: "RescoringMatchScorer",
+ sort: &SortOptions{Scorer: RescoringMatchScorer},
+ wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()},
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.SortSpec, tt.wantSort) {
+ t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort)
+ }
+ if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) {
+ t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFieldSpec(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ errFoo := errors.New("foo") // sentinel error when there isn't one.
+
+ testCases := []struct {
+ desc string
+ opts *SearchOptions
+ want *pb.FieldSpec
+ }{
+ {
+ desc: "No options",
+ want: &pb.FieldSpec{},
+ },
+ {
+ desc: "Fields",
+ opts: &SearchOptions{
+ Fields: []string{"one", "two"},
+ },
+ want: &pb.FieldSpec{
+ Name: []string{"one", "two"},
+ },
+ },
+ {
+ desc: "Expressions",
+ opts: &SearchOptions{
+ Expressions: []FieldExpression{
+ {Name: "one", Expr: "price * quantity"},
+ {Name: "two", Expr: "min(daily_use, 10) * rate"},
+ },
+ },
+ want: &pb.FieldSpec{
+ Expression: []*pb.FieldSpec_Expression{
+ {Name: proto.String("one"), Expression: proto.String("price * quantity")},
+ {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")},
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.FieldSpec, tt.want) {
+ t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want)
+ }
+ return errFoo // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", tt.opts)
+ if _, err := it.Next(nil); err != errFoo {
+ t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo)
+ }
+ }
+}
+
+func TestBasicSearchOpts(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ facetOpts []FacetSearchOption
+ cursor Cursor
+ offset int
+ countAccuracy int
+ want *pb.SearchParams
+ wantErr string
+ }{
+ {
+ desc: "No options",
+ want: &pb.SearchParams{},
+ },
+ {
+ desc: "Default auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(0, 0),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(10),
+ },
+ },
+ {
+ desc: "Auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Param Depth",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Doc depth",
+ facetOpts: []FacetSearchOption{
+ FacetDocumentDepth(123),
+ },
+ want: &pb.SearchParams{
+ FacetDepth: proto.Int32(123),
+ },
+ },
+ {
+ desc: "Facet discovery",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour"),
+ FacetDiscovery("size", Atom("M"), Atom("L")),
+ FacetDiscovery("price", LessThan(7), Range{7, 14}, AtLeast(14)),
+ },
+ want: &pb.SearchParams{
+ IncludeFacet: []*pb.FacetRequest{
+ {Name: proto.String("colour")},
+ {Name: proto.String("size"), Params: &pb.FacetRequestParam{
+ ValueConstraint: []string{"M", "L"},
+ }},
+ {Name: proto.String("price"), Params: &pb.FacetRequestParam{
+ Range: []*pb.FacetRange{
+ {End: proto.String("7e+00")},
+ {Start: proto.String("7e+00"), End: proto.String("1.4e+01")},
+ {Start: proto.String("1.4e+01")},
+ },
+ }},
+ },
+ },
+ },
+ {
+ desc: "Facet discovery - bad value",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", true),
+ },
+ wantErr: "bad FacetSearchOption: unsupported value type bool",
+ },
+ {
+ desc: "Facet discovery - mix value types",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Atom("blue"), AtLeast(7)),
+ },
+ wantErr: "bad FacetSearchOption: values must all be Atom, or must all be Range",
+ },
+ {
+ desc: "Facet discovery - invalid range",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Range{negInf, posInf}),
+ },
+ wantErr: "bad FacetSearchOption: invalid range: either Start or End must be finite",
+ },
+ {
+ desc: "Cursor",
+ cursor: Cursor("mycursor"),
+ want: &pb.SearchParams{
+ Cursor: proto.String("mycursor"),
+ },
+ },
+ {
+ desc: "Offset",
+ offset: 121,
+ want: &pb.SearchParams{
+ Offset: proto.Int32(121),
+ },
+ },
+ {
+ desc: "Cursor and Offset set",
+ cursor: Cursor("mycursor"),
+ offset: 121,
+ wantErr: "at most one of Cursor and Offset may be specified",
+ },
+ {
+ desc: "Count accuracy",
+ countAccuracy: 100,
+ want: &pb.SearchParams{
+ MatchedCountAccuracy: proto.Int32(100),
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if tt.want == nil {
+ t.Errorf("%s: expected call to fail", tt.desc)
+ return nil
+ }
+ // Set default fields.
+ tt.want.Query = proto.String("gopher")
+ tt.want.IndexSpec = &pb.IndexSpec{Name: proto.String("Doc")}
+ tt.want.CursorType = pb.SearchParams_PER_RESULT.Enum()
+ tt.want.FieldSpec = &pb.FieldSpec{}
+ if got := req.Params; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{
+ Facets: tt.facetOpts,
+ Cursor: tt.cursor,
+ Offset: tt.offset,
+ CountAccuracy: tt.countAccuracy,
+ })
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFacetRefinements(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ refine []Facet
+ want []*pb.FacetRefinement
+ wantErr string
+ }{
+ {
+ desc: "No refinements",
+ },
+ {
+ desc: "Basic",
+ refine: []Facet{
+ {Name: "fur", Value: Atom("fluffy")},
+ {Name: "age", Value: LessThan(123)},
+ {Name: "age", Value: AtLeast(0)},
+ {Name: "legs", Value: Range{Start: 3, End: 5}},
+ },
+ want: []*pb.FacetRefinement{
+ {Name: proto.String("fur"), Value: proto.String("fluffy")},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{End: proto.String("1.23e+02")}},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{Start: proto.String("0e+00")}},
+ {Name: proto.String("legs"), Range: &pb.FacetRefinement_Range{Start: proto.String("3e+00"), End: proto.String("5e+00")}},
+ },
+ },
+ {
+ desc: "Infinite range",
+ refine: []Facet{
+ {Name: "age", Value: Range{Start: negInf, End: posInf}},
+ },
+ wantErr: `search: refinement for facet "age": either Start or End must be finite`,
+ },
+ {
+ desc: "Bad End value in range",
+ refine: []Facet{
+ {Name: "age", Value: LessThan(2147483648)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for End`,
+ },
+ {
+ desc: "Bad Start value in range",
+ refine: []Facet{
+ {Name: "age", Value: AtLeast(-2147483649)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for Start`,
+ },
+ {
+ desc: "Unknown value type",
+ refine: []Facet{
+ {Name: "age", Value: "you can't use strings!"},
+ },
+ wantErr: `search: unsupported refinement for facet "age" of type string`,
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if got := req.Params.FacetRefinement; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params.FacetRefinement=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Refinements: tt.refine})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(req *pb.IndexDocumentRequest, res *pb.IndexDocumentResponse) error {
+ namec <- req.Params.IndexSpec.Namespace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ i := (&Index{})
+
+ i.Put(c0, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c0: ns = %q, want nil`, *ns)
+ }
+
+ i.Put(c1, "something", &searchDoc)
+ if ns := <-namec; ns == nil {
+ t.Error(`Put with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Put with c1: ns = %q, want "A"`, *ns)
+ }
+
+ i.Put(c2, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c2: ns = %q, want nil`, *ns)
+ }
+}
+
+func TestDelete(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.Delete(c, "id"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDeleteMulti(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.DeleteMulti(c, []string{"id1", "id2"}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDeleteWrongNumberOfResults(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.DeleteMulti(c, []string{"id1", "id2"}); err == nil {
+ t.Fatalf("got nil, want error")
+ }
+}
+
+func TestDeleteMultiError(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_PERMISSION_DENIED.Enum(), ErrorDetail: proto.String("foo")},
+ },
+ }
+ return nil
+ })
+
+ switch err := index.DeleteMulti(c, []string{"id1", "id2"}); {
+ case err == nil:
+ t.Fatalf("got nil, want error")
+ case err.(appengine.MultiError)[0] != nil:
+ t.Fatalf("got %v, want nil MultiError[0]", err.(appengine.MultiError)[0])
+ case err.(appengine.MultiError)[1] == nil:
+ t.Fatalf("got nil, want not-nill MultiError[1]")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go
new file mode 100644
index 000000000..e73d2f2ef
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct.go
@@ -0,0 +1,251 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// ErrFacetMismatch is returned when a facet is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. StructType is the type of the struct
+// pointed to by the destination argument passed to Iterator.Next.
+type ErrFacetMismatch struct {
+ StructType reflect.Type
+ FacetName string
+ Reason string
+}
+
+func (e *ErrFacetMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason)
+}
+
+// structCodec defines how to convert a given struct to/from a search document.
+type structCodec struct {
+ // byIndex returns the struct tag for the i'th struct field.
+ byIndex []structTag
+
+ // fieldByName returns the index of the struct field for the given field name.
+ fieldByName map[string]int
+
+ // facetByName returns the index of the struct field for the given facet name,
+ facetByName map[string]int
+}
+
+// structTag holds a structured version of each struct field's parsed tag.
+type structTag struct {
+ name string
+ facet bool
+ ignore bool
+}
+
+var (
+ codecsMu sync.RWMutex
+ codecs = map[reflect.Type]*structCodec{}
+)
+
+func loadCodec(t reflect.Type) (*structCodec, error) {
+ codecsMu.RLock()
+ codec, ok := codecs[t]
+ codecsMu.RUnlock()
+ if ok {
+ return codec, nil
+ }
+
+ codecsMu.Lock()
+ defer codecsMu.Unlock()
+ if codec, ok := codecs[t]; ok {
+ return codec, nil
+ }
+
+ codec = &structCodec{
+ fieldByName: make(map[string]int),
+ facetByName: make(map[string]int),
+ }
+
+ for i, I := 0, t.NumField(); i < I; i++ {
+ f := t.Field(i)
+ name, opts := f.Tag.Get("search"), ""
+ if i := strings.Index(name, ","); i != -1 {
+ name, opts = name[:i], name[i+1:]
+ }
+ ignore := false
+ if name == "-" {
+ ignore = true
+ } else if name == "" {
+ name = f.Name
+ } else if !validFieldName(name) {
+ return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name)
+ }
+ facet := opts == "facet"
+ codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore})
+ if facet {
+ codec.facetByName[name] = i
+ } else {
+ codec.fieldByName[name] = i
+ }
+ }
+
+ codecs[t] = codec
+ return codec, nil
+}
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error {
+ var err error
+ for _, field := range fields {
+ i, ok := s.codec.fieldByName[field.Name]
+ if !ok {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "no such struct field",
+ }
+ continue
+
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "cannot set struct field",
+ }
+ continue
+ }
+ v := reflect.ValueOf(field.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+ }
+ continue
+ }
+ f.Set(v)
+ }
+ if meta == nil {
+ return err
+ }
+ for _, facet := range meta.Facets {
+ i, ok := s.codec.facetByName[facet.Name]
+ if !ok {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "no matching field found",
+ }
+ }
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "unable to set unexported field of struct",
+ }
+ }
+ continue
+ }
+ v := reflect.ValueOf(facet.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt),
+ }
+ continue
+ }
+ }
+ f.Set(v)
+ }
+ return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+ fields := make([]Field, 0, len(s.codec.fieldByName))
+ var facets []Facet
+ for i, tag := range s.codec.byIndex {
+ if tag.ignore {
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ continue
+ }
+ if tag.facet {
+ facets = append(facets, Facet{Name: tag.name, Value: f.Interface()})
+ } else {
+ fields = append(fields, Field{Name: tag.name, Value: f.Interface()})
+ }
+ }
+ return fields, &DocumentMetadata{Facets: facets}, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidDocumentType
+ }
+ codec, err := loadCodec(v.Elem().Type())
+ if err != nil {
+ return nil, err
+ }
+ return structFLS{v.Elem(), codec}, nil
+}
+
+func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error {
+ x, err := newStructFLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(f, meta)
+}
+
+func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) {
+ x, err := newStructFLS(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ return x.Save()
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+ return loadStructWithMeta(dst, f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+ f, _, err := saveStructWithMeta(src)
+ return f, err
+}
diff --git a/vendor/google.golang.org/appengine/search/struct_test.go b/vendor/google.golang.org/appengine/search/struct_test.go
new file mode 100644
index 000000000..4e5b5d1b8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct_test.go
@@ -0,0 +1,213 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestLoadingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields []Field
+ meta *DocumentMetadata
+ want interface{}
+ wantErr bool
+ }{
+ {
+ desc: "Basic struct",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ want: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ },
+ {
+ desc: "Struct with tags",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ }},
+ want: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ },
+ {
+ desc: "Bad field from tag",
+ want: &struct {
+ AlphaBeta string `search:"αβ"`
+ }{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore missing field",
+ fields: []Field{
+ {Name: "Meaning", Value: float64(42)},
+ },
+ want: &struct{}{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore unsettable field",
+ fields: []Field{
+ {Name: "meaning", Value: float64(42)},
+ },
+ want: &struct{ meaning float64 }{}, // field not populated.
+ wantErr: true,
+ },
+ {
+ desc: "Error on missing facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Missing", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error on unsettable facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "unset", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored field",
+ fields: []Field{
+ {Name: "Set", Value: "yes"},
+ {Name: "Ignored", Value: "no"},
+ },
+ want: &struct {
+ Set string
+ Ignored string `search:"-"`
+ }{Set: "yes"},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Ignored", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ Ignored Atom `search:"-,facet"`
+ }{Set: Atom("yes")},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range testCases {
+ // Make a pointer to an empty version of what want points to.
+ dst := reflect.New(reflect.TypeOf(tt.want).Elem()).Interface()
+ err := loadStructWithMeta(dst, tt.fields, tt.meta)
+ if err != nil != tt.wantErr {
+ t.Errorf("%s: got err %v; want err %t", tt.desc, err, tt.wantErr)
+ continue
+ }
+ if !reflect.DeepEqual(dst, tt.want) {
+ t.Errorf("%s: doesn't match\ngot: %v\nwant: %v", tt.desc, dst, tt.want)
+ }
+ }
+}
+
+func TestSavingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ doc interface{}
+ wantFields []Field
+ wantFacets []Facet
+ }{
+ {
+ desc: "Basic struct",
+ doc: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Struct with tags",
+ doc: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ },
+ },
+ {
+ desc: "Ignore unexported struct fields",
+ doc: &struct {
+ Name string
+ info string
+ Legs float64 `search:",facet"`
+ fuzz Atom `search:",facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Ignore fields marked -",
+ doc: &struct {
+ Name string
+ Info string `search:"-"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"-,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ fields, meta, err := saveStructWithMeta(tt.doc)
+ if err != nil {
+ t.Errorf("%s: got err %v; want nil", tt.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(fields, tt.wantFields) {
+ t.Errorf("%s: fields don't match\ngot: %v\nwant: %v", tt.desc, fields, tt.wantFields)
+ }
+ if facets := meta.Facets; !reflect.DeepEqual(facets, tt.wantFacets) {
+ t.Errorf("%s: facets don't match\ngot: %v\nwant: %v", tt.desc, facets, tt.wantFacets)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go
new file mode 100644
index 000000000..3de46df82
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package socket provides outbound network sockets.
+//
+// This package is only required in the classic App Engine environment.
+// Applications running only in App Engine "flexible environment" should
+// use the standard library's net package.
+package socket
diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go
new file mode 100644
index 000000000..0ad50e2d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_classic.go
@@ -0,0 +1,290 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package socket
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+
+ pb "google.golang.org/appengine/internal/socket"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ return DialTimeout(ctx, protocol, addr, 0)
+}
+
+var ipFamilies = []pb.CreateSocketRequest_SocketFamily{
+ pb.CreateSocketRequest_IPv4,
+ pb.CreateSocketRequest_IPv6,
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ dialCtx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err)
+ }
+
+ var prot pb.CreateSocketRequest_SocketProtocol
+ switch protocol {
+ case "tcp":
+ prot = pb.CreateSocketRequest_TCP
+ case "udp":
+ prot = pb.CreateSocketRequest_UDP
+ default:
+ return nil, fmt.Errorf("socket: unknown protocol %q", protocol)
+ }
+
+ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ if len(packedAddrs) == 0 {
+ return nil, fmt.Errorf("no addresses for %q", host)
+ }
+
+ packedAddr := packedAddrs[0] // use first address
+ fam := pb.CreateSocketRequest_IPv4
+ if len(packedAddr) == net.IPv6len {
+ fam = pb.CreateSocketRequest_IPv6
+ }
+
+ req := &pb.CreateSocketRequest{
+ Family: fam.Enum(),
+ Protocol: prot.Enum(),
+ RemoteIp: &pb.AddressPort{
+ Port: proto.Int32(int32(port)),
+ PackedAddress: packedAddr,
+ },
+ }
+ if resolved {
+ req.RemoteIp.HostnameHint = &host
+ }
+ res := &pb.CreateSocketReply{}
+ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil {
+ return nil, err
+ }
+
+ return &Conn{
+ ctx: ctx,
+ desc: res.GetSocketDescriptor(),
+ prot: prot,
+ local: res.ProxyExternalIp,
+ remote: req.RemoteIp,
+ }, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ packedAddrs, _, err := resolve(ctx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ addrs = make([]net.IP, len(packedAddrs))
+ for i, pa := range packedAddrs {
+ addrs[i] = net.IP(pa)
+ }
+ return addrs, nil
+}
+
+func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {
+ // Check if it's an IP address.
+ if ip := net.ParseIP(host); ip != nil {
+ if ip := ip.To4(); ip != nil {
+ return [][]byte{ip}, false, nil
+ }
+ return [][]byte{ip}, false, nil
+ }
+
+ req := &pb.ResolveRequest{
+ Name: &host,
+ AddressFamilies: fams,
+ }
+ res := &pb.ResolveReply{}
+ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil {
+ // XXX: need to map to pb.ResolveReply_ErrorCode?
+ return nil, false, err
+ }
+ return res.PackedAddress, true, nil
+}
+
+// withDeadline is like context.WithDeadline, except it ignores the zero deadline.
+func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if deadline.IsZero() {
+ return parent, func() {}
+ }
+ return context.WithDeadline(parent, deadline)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ ctx context.Context
+ desc string
+ offset int64
+
+ prot pb.CreateSocketRequest_SocketProtocol
+ local, remote *pb.AddressPort
+
+ readDeadline, writeDeadline time.Time // optional
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ cn.ctx = ctx
+}
+
+func (cn *Conn) Read(b []byte) (n int, err error) {
+ const maxRead = 1 << 20
+ if len(b) > maxRead {
+ b = b[:maxRead]
+ }
+
+ req := &pb.ReceiveRequest{
+ SocketDescriptor: &cn.desc,
+ DataSize: proto.Int32(int32(len(b))),
+ }
+ res := &pb.ReceiveReply{}
+ if !cn.readDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline)
+ defer cancel()
+ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil {
+ return 0, err
+ }
+ if len(res.Data) == 0 {
+ return 0, io.EOF
+ }
+ if len(res.Data) > len(b) {
+ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b))
+ }
+ return copy(b, res.Data), nil
+}
+
+func (cn *Conn) Write(b []byte) (n int, err error) {
+ const lim = 1 << 20 // max per chunk
+
+ for n < len(b) {
+ chunk := b[n:]
+ if len(chunk) > lim {
+ chunk = chunk[:lim]
+ }
+
+ req := &pb.SendRequest{
+ SocketDescriptor: &cn.desc,
+ Data: chunk,
+ StreamOffset: &cn.offset,
+ }
+ res := &pb.SendReply{}
+ if !cn.writeDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)
+ defer cancel()
+ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil {
+ // assume zero bytes were sent in this RPC
+ break
+ }
+ n += int(res.GetDataSent())
+ cn.offset += int64(res.GetDataSent())
+ }
+
+ return
+}
+
+func (cn *Conn) Close() error {
+ req := &pb.CloseRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.CloseReply{}
+ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil {
+ return err
+ }
+ cn.desc = "CLOSED"
+ return nil
+}
+
+func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {
+ if ap == nil {
+ return nil
+ }
+ switch prot {
+ case pb.CreateSocketRequest_TCP:
+ return &net.TCPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ case pb.CreateSocketRequest_UDP:
+ return &net.UDPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ }
+ panic("unknown protocol " + prot.String())
+}
+
+func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) }
+func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }
+
+func (cn *Conn) SetDeadline(t time.Time) error {
+ cn.readDeadline = t
+ cn.writeDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetReadDeadline(t time.Time) error {
+ cn.readDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetWriteDeadline(t time.Time) error {
+ cn.writeDeadline = t
+ return nil
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ req := &pb.GetSocketNameRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.GetSocketNameReply{}
+ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go
new file mode 100644
index 000000000..c804169a1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_vm.go
@@ -0,0 +1,64 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package socket
+
+import (
+ "net"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ conn, err := net.Dial(protocol, addr)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ conn, err := net.DialTimeout(protocol, addr, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ return net.LookupIP(host)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ net.Conn
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ // This function is not required in App Engine "flexible environment".
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ // This function is not required in App Engine "flexible environment".
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
new file mode 100644
index 000000000..965c5ab4c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
@@ -0,0 +1,541 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package taskqueue provides a client for App Engine's taskqueue service.
+Using this service, applications may perform work outside a user's request.
+
+A Task may be constructed manually; alternatively, since the most common
+taskqueue operation is to add a single POST task, NewPOSTTask makes it easy.
+
+ t := taskqueue.NewPOSTTask("/worker", url.Values{
+ "key": {key},
+ })
+ taskqueue.Add(c, t, "") // add t to the default queue
+*/
+package taskqueue // import "google.golang.org/appengine/taskqueue"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ dspb "google.golang.org/appengine/internal/datastore"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+var (
+ // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.
+ ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added")
+)
+
+// RetryOptions let you control whether to retry a task and the backoff intervals between tries.
+type RetryOptions struct {
+ // Number of tries/leases after which the task fails permanently and is deleted.
+ // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.
+ RetryLimit int32
+
+ // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).
+ // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.
+ AgeLimit time.Duration
+
+ // Minimum time between successive tries (only for push tasks).
+ MinBackoff time.Duration
+
+ // Maximum time between successive tries (only for push tasks).
+ MaxBackoff time.Duration
+
+ // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).
+ MaxDoublings int32
+
+ // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.
+ // Otherwise a zero MaxDoublings is ignored and the default is used.
+ ApplyZeroMaxDoublings bool
+}
+
+// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.
+func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {
+ params := &pb.TaskQueueRetryParameters{}
+ if opt.RetryLimit > 0 {
+ params.RetryLimit = proto.Int32(opt.RetryLimit)
+ }
+ if opt.AgeLimit > 0 {
+ params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))
+ }
+ if opt.MinBackoff > 0 {
+ params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())
+ }
+ if opt.MaxBackoff > 0 {
+ params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())
+ }
+ if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {
+ params.MaxDoublings = proto.Int32(opt.MaxDoublings)
+ }
+ return params
+}
+
+// A Task represents a task to be executed.
+type Task struct {
+ // Path is the worker URL for the task.
+ // If unset, it will default to /_ah/queue/<queue_name>.
+ Path string
+
+ // Payload is the data for the task.
+ // This will be delivered as the HTTP request body.
+ // It is only used when Method is POST, PUT or PULL.
+ // url.Values' Encode method may be used to generate this for POST requests.
+ Payload []byte
+
+ // Additional HTTP headers to pass at the task's execution time.
+ // To schedule the task to be run with an alternate app version
+ // or backend, set the "Host" header.
+ Header http.Header
+
+ // Method is the HTTP method for the task ("GET", "POST", etc.),
+ // or "PULL" if this is task is destined for a pull-based queue.
+ // If empty, this defaults to "POST".
+ Method string
+
+ // A name for the task.
+ // If empty, a name will be chosen.
+ Name string
+
+ // Delay specifies the duration the task queue service must wait
+ // before executing the task.
+ // Either Delay or ETA may be set, but not both.
+ Delay time.Duration
+
+ // ETA specifies the earliest time a task may be executed (push queues)
+ // or leased (pull queues).
+ // Either Delay or ETA may be set, but not both.
+ ETA time.Time
+
+ // The number of times the task has been dispatched or leased.
+ RetryCount int32
+
+ // Tag for the task. Only used when Method is PULL.
+ Tag string
+
+ // Retry options for this task. May be nil.
+ RetryOptions *RetryOptions
+}
+
+func (t *Task) method() string {
+ if t.Method == "" {
+ return "POST"
+ }
+ return t.Method
+}
+
+// NewPOSTTask creates a Task that will POST to a path with the given form data.
+func NewPOSTTask(path string, params url.Values) *Task {
+ h := make(http.Header)
+ h.Set("Content-Type", "application/x-www-form-urlencoded")
+ return &Task{
+ Path: path,
+ Payload: []byte(params.Encode()),
+ Header: h,
+ Method: "POST",
+ }
+}
+
+// RequestHeaders are the special HTTP request headers available to push task
+// HTTP request handlers. These headers are set internally by App Engine.
+// See https://cloud.google.com/appengine/docs/standard/go/taskqueue/push/creating-handlers#reading_request_headers
+// for a description of the fields.
+type RequestHeaders struct {
+ QueueName string
+ TaskName string
+ TaskRetryCount int64
+ TaskExecutionCount int64
+ TaskETA time.Time
+
+ TaskPreviousResponse int
+ TaskRetryReason string
+ FailFast bool
+}
+
+// ParseRequestHeaders parses the special HTTP request headers available to push
+// task request handlers. This function silently ignores values of the wrong
+// format.
+func ParseRequestHeaders(h http.Header) *RequestHeaders {
+ ret := &RequestHeaders{
+ QueueName: h.Get("X-AppEngine-QueueName"),
+ TaskName: h.Get("X-AppEngine-TaskName"),
+ }
+
+ ret.TaskRetryCount, _ = strconv.ParseInt(h.Get("X-AppEngine-TaskRetryCount"), 10, 64)
+ ret.TaskExecutionCount, _ = strconv.ParseInt(h.Get("X-AppEngine-TaskExecutionCount"), 10, 64)
+
+ etaSecs, _ := strconv.ParseInt(h.Get("X-AppEngine-TaskETA"), 10, 64)
+ if etaSecs != 0 {
+ ret.TaskETA = time.Unix(etaSecs, 0)
+ }
+
+ ret.TaskPreviousResponse, _ = strconv.Atoi(h.Get("X-AppEngine-TaskPreviousResponse"))
+ ret.TaskRetryReason = h.Get("X-AppEngine-TaskRetryReason")
+ if h.Get("X-AppEngine-FailFast") != "" {
+ ret.FailFast = true
+ }
+
+ return ret
+}
+
+var (
+ currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+)
+
+func getDefaultNamespace(ctx context.Context) string {
+ return internal.IncomingHeaders(ctx).Get(defaultNamespace)
+}
+
+func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ path := task.Path
+ if path == "" {
+ path = "/_ah/queue/" + queueName
+ }
+ eta := task.ETA
+ if eta.IsZero() {
+ eta = time.Now().Add(task.Delay)
+ } else if task.Delay != 0 {
+ panic("taskqueue: both Delay and ETA are set")
+ }
+ req := &pb.TaskQueueAddRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(eta.UnixNano() / 1e3),
+ }
+ method := task.method()
+ if method == "PULL" {
+ // Pull-based task
+ req.Body = task.Payload
+ req.Mode = pb.TaskQueueMode_PULL.Enum()
+ if task.Tag != "" {
+ req.Tag = []byte(task.Tag)
+ }
+ } else {
+ // HTTP-based task
+ if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {
+ req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()
+ } else {
+ return nil, fmt.Errorf("taskqueue: bad method %q", method)
+ }
+ req.Url = []byte(path)
+ for k, vs := range task.Header {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+ if method == "POST" || method == "PUT" {
+ req.Body = task.Payload
+ }
+
+ // Namespace headers.
+ if _, ok := task.Header[currentNamespace]; !ok {
+ // Fetch the current namespace of this request.
+ ns := internal.NamespaceFromContext(c)
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(currentNamespace),
+ Value: []byte(ns),
+ })
+ }
+ if _, ok := task.Header[defaultNamespace]; !ok {
+ // Fetch the X-AppEngine-Default-Namespace header of this request.
+ if ns := getDefaultNamespace(c); ns != "" {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(defaultNamespace),
+ Value: []byte(ns),
+ })
+ }
+ }
+ }
+
+ if task.RetryOptions != nil {
+ req.RetryParameters = task.RetryOptions.toRetryParameters()
+ }
+
+ return req, nil
+}
+
+var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{
+ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,
+ pb.TaskQueueServiceError_TOMBSTONED_TASK: true,
+}
+
+// Add adds the task to a named queue.
+// An empty queue name means that the default queue will be used.
+// Add returns an equivalent Task with defaults filled in, including setting
+// the task's Name field to the chosen name if the original was empty.
+func Add(c context.Context, task *Task, queueName string) (*Task, error) {
+ req, err := newAddReq(c, task, queueName)
+ if err != nil {
+ return nil, err
+ }
+ res := &pb.TaskQueueAddResponse{}
+ if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil {
+ apiErr, ok := err.(*internal.APIError)
+ if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {
+ return nil, ErrTaskAlreadyAdded
+ }
+ return nil, err
+ }
+ resultTask := *task
+ resultTask.Method = task.method()
+ if task.Name == "" {
+ resultTask.Name = string(res.ChosenTaskName)
+ }
+ return &resultTask, nil
+}
+
+// AddMulti adds multiple tasks to a named queue.
+// An empty queue name means that the default queue will be used.
+// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting
+// each task's Name field to the chosen name if the original was empty.
+// If a given task is badly formed or could not be added, an appengine.MultiError is returned.
+func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) {
+ req := &pb.TaskQueueBulkAddRequest{
+ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),
+ }
+ me, any := make(appengine.MultiError, len(tasks)), false
+ for i, t := range tasks {
+ req.AddRequest[i], me[i] = newAddReq(c, t, queueName)
+ any = any || me[i] != nil
+ }
+ if any {
+ return nil, me
+ }
+ res := &pb.TaskQueueBulkAddResponse{}
+ if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil {
+ return nil, err
+ }
+ if len(res.Taskresult) != len(tasks) {
+ return nil, errors.New("taskqueue: server error")
+ }
+ tasksOut := make([]*Task, len(tasks))
+ for i, tr := range res.Taskresult {
+ tasksOut[i] = new(Task)
+ *tasksOut[i] = *tasks[i]
+ tasksOut[i].Method = tasksOut[i].method()
+ if tasksOut[i].Name == "" {
+ tasksOut[i].Name = string(tr.ChosenTaskName)
+ }
+ if *tr.Result != pb.TaskQueueServiceError_OK {
+ if alreadyAddedErrors[*tr.Result] {
+ me[i] = ErrTaskAlreadyAdded
+ } else {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(*tr.Result),
+ }
+ }
+ any = true
+ }
+ }
+ if any {
+ return tasksOut, me
+ }
+ return tasksOut, nil
+}
+
+// Delete deletes a task from a named queue.
+func Delete(c context.Context, task *Task, queueName string) error {
+ err := DeleteMulti(c, []*Task{task}, queueName)
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti deletes multiple tasks from a named queue.
+// If a given task could not be deleted, an appengine.MultiError is returned.
+// Each task is deleted independently; one may fail to delete while the others
+// are sucessfully deleted.
+func DeleteMulti(c context.Context, tasks []*Task, queueName string) error {
+ taskNames := make([][]byte, len(tasks))
+ for i, t := range tasks {
+ taskNames[i] = []byte(t.Name)
+ }
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueDeleteRequest{
+ QueueName: []byte(queueName),
+ TaskName: taskNames,
+ }
+ res := &pb.TaskQueueDeleteResponse{}
+ if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil {
+ return err
+ }
+ if a, b := len(req.TaskName), len(res.Result); a != b {
+ return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b)
+ }
+ me, any := make(appengine.MultiError, len(res.Result)), false
+ for i, ec := range res.Result {
+ if ec != pb.TaskQueueServiceError_OK {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(ec),
+ }
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueQueryAndOwnTasksRequest{
+ QueueName: []byte(queueName),
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ MaxTasks: proto.Int64(int64(maxTasks)),
+ GroupByTag: proto.Bool(groupByTag),
+ Tag: tag,
+ }
+ res := &pb.TaskQueueQueryAndOwnTasksResponse{}
+ if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil {
+ return nil, err
+ }
+ tasks := make([]*Task, len(res.Task))
+ for i, t := range res.Task {
+ tasks[i] = &Task{
+ Payload: t.Body,
+ Name: string(t.TaskName),
+ Method: "PULL",
+ ETA: time.Unix(0, *t.EtaUsec*1e3),
+ RetryCount: *t.RetryCount,
+ Tag: string(t.Tag),
+ }
+ }
+ return tasks, nil
+}
+
+// Lease leases tasks from a queue.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, false, nil)
+}
+
+// LeaseByTag leases tasks from a queue, grouped by tag.
+// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))
+}
+
+// Purge removes all tasks from a queue.
+func Purge(c context.Context, queueName string) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueuePurgeQueueRequest{
+ QueueName: []byte(queueName),
+ }
+ res := &pb.TaskQueuePurgeQueueResponse{}
+ return internal.Call(c, "taskqueue", "PurgeQueue", req, res)
+}
+
+// ModifyLease modifies the lease of a task.
+// Used to request more processing time, or to abandon processing.
+// leaseTime is in seconds and must not be negative.
+func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueModifyTaskLeaseRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ }
+ res := &pb.TaskQueueModifyTaskLeaseResponse{}
+ if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil {
+ return err
+ }
+ task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)
+ return nil
+}
+
+// QueueStatistics represents statistics about a single task queue.
+type QueueStatistics struct {
+ Tasks int // may be an approximation
+ OldestETA time.Time // zero if there are no pending tasks
+
+ Executed1Minute int // tasks executed in the last minute
+ InFlight int // tasks executing now
+ EnforcedRate float64 // requests per second
+}
+
+// QueueStats retrieves statistics about queues.
+func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) {
+ req := &pb.TaskQueueFetchQueueStatsRequest{
+ QueueName: make([][]byte, len(queueNames)),
+ }
+ for i, q := range queueNames {
+ if q == "" {
+ q = "default"
+ }
+ req.QueueName[i] = []byte(q)
+ }
+ res := &pb.TaskQueueFetchQueueStatsResponse{}
+ if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil {
+ return nil, err
+ }
+ qs := make([]QueueStatistics, len(res.Queuestats))
+ for i, qsg := range res.Queuestats {
+ qs[i] = QueueStatistics{
+ Tasks: int(*qsg.NumTasks),
+ }
+ if eta := *qsg.OldestEtaUsec; eta > -1 {
+ qs[i].OldestETA = time.Unix(0, eta*1e3)
+ }
+ if si := qsg.ScannerInfo; si != nil {
+ qs[i].Executed1Minute = int(*si.ExecutedLastMinute)
+ qs[i].InFlight = int(si.GetRequestsInFlight())
+ qs[i].EnforcedRate = si.GetEnforcedRate()
+ }
+ }
+ return qs, nil
+}
+
+func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {
+ x.Transaction = t
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name)
+
+ // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.
+ dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)
+ internal.RegisterTimeoutErrorCode("taskqueue", dsCode)
+
+ // Transaction registration.
+ internal.RegisterTransactionSetter(setTransaction)
+ internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {
+ for _, req := range x.AddRequest {
+ setTransaction(req, t)
+ }
+ })
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
new file mode 100644
index 000000000..d9eec50b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
@@ -0,0 +1,173 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package taskqueue
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+func TestAddErrors(t *testing.T) {
+ var tests = []struct {
+ err, want error
+ sameErr bool // if true, should return err exactly
+ }{
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE),
+ },
+ want: errors.New("not used"),
+ sameErr: true,
+ },
+ }
+ for _, tc := range tests {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ // don't fill in any of the response
+ return tc.err
+ })
+ task := &Task{Path: "/worker", Method: "PULL"}
+ _, err := Add(c, task, "a-queue")
+ want := tc.want
+ if tc.sameErr {
+ want = tc.err
+ }
+ if err != want {
+ t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want)
+ }
+ }
+}
+
+func TestAddMulti(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error {
+ res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{
+ {
+ Result: pb.TaskQueueServiceError_OK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(),
+ },
+ }
+ return nil
+ })
+ tasks := []*Task{
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ }
+ r, err := AddMulti(c, tasks, "a-queue")
+ if len(r) != len(tasks) {
+ t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks))
+ }
+ want := appengine.MultiError{
+ nil,
+ ErrTaskAlreadyAdded,
+ ErrTaskAlreadyAdded,
+ &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR),
+ },
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("AddMulti got %v, wanted %v", err, want)
+ }
+}
+
+func TestAddWithEmptyPath(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ if got, want := string(req.Url), "/_ah/queue/a-queue"; got != want {
+ return fmt.Errorf("req.Url = %q; want %q", got, want)
+ }
+ return nil
+ })
+ if _, err := Add(c, &Task{}, "a-queue"); err != nil {
+ t.Fatalf("Add: %v", err)
+ }
+}
+
+func TestParseRequestHeaders(t *testing.T) {
+ tests := []struct {
+ Header http.Header
+ Want RequestHeaders
+ }{
+ {
+ Header: map[string][]string{
+ "X-Appengine-Queuename": []string{"foo"},
+ "X-Appengine-Taskname": []string{"bar"},
+ "X-Appengine-Taskretrycount": []string{"4294967297"}, // 2^32 + 1
+ "X-Appengine-Taskexecutioncount": []string{"4294967298"}, // 2^32 + 2
+ "X-Appengine-Tasketa": []string{"1500000000"},
+ "X-Appengine-Taskpreviousresponse": []string{"404"},
+ "X-Appengine-Taskretryreason": []string{"baz"},
+ "X-Appengine-Failfast": []string{"yes"},
+ },
+ Want: RequestHeaders{
+ QueueName: "foo",
+ TaskName: "bar",
+ TaskRetryCount: 4294967297,
+ TaskExecutionCount: 4294967298,
+ TaskETA: time.Date(2017, time.July, 14, 2, 40, 0, 0, time.UTC),
+ TaskPreviousResponse: 404,
+ TaskRetryReason: "baz",
+ FailFast: true,
+ },
+ },
+ {
+ Header: map[string][]string{},
+ Want: RequestHeaders{
+ QueueName: "",
+ TaskName: "",
+ TaskRetryCount: 0,
+ TaskExecutionCount: 0,
+ TaskETA: time.Time{},
+ TaskPreviousResponse: 0,
+ TaskRetryReason: "",
+ FailFast: false,
+ },
+ },
+ }
+
+ for idx, test := range tests {
+ got := *ParseRequestHeaders(test.Header)
+ if got.TaskETA.UnixNano() != test.Want.TaskETA.UnixNano() {
+ t.Errorf("%d. ParseRequestHeaders got TaskETA %v, wanted %v", idx, got.TaskETA, test.Want.TaskETA)
+ }
+ got.TaskETA = time.Time{}
+ test.Want.TaskETA = time.Time{}
+ if !reflect.DeepEqual(got, test.Want) {
+ t.Errorf("%d. ParseRequestHeaders got %v, wanted %v", idx, got, test.Want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 000000000..05642a992
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if err == context.DeadlineExceeded {
+ return true
+ }
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 000000000..6ffe1e6d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 000000000..ffad57182
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scopes is non-empty and the current user does not have at least one of the
+// scopes, this method will return an error.
+func CurrentOAuth(c context.Context, scopes ...string) (*User, error) {
+ req := &pb.GetOAuthUserRequest{}
+ if len(scopes) != 1 || scopes[0] != "" {
+ // The signature for this function used to be CurrentOAuth(Context, string).
+ // Ignore the singular "" scope to preserve existing behavior.
+ req.Scopes = scopes
+ }
+
+ res := &pb.GetOAuthUserResponse{}
+
+ err := internal.Call(c, "user", "GetOAuthUser", req, res)
+ if err != nil {
+ return nil, err
+ }
+ return &User{
+ Email: *res.Email,
+ AuthDomain: *res.AuthDomain,
+ Admin: res.GetIsAdmin(),
+ ID: *res.UserId,
+ ClientID: res.GetClientId(),
+ }, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c context.Context) (string, error) {
+ req := &pb.CheckOAuthSignatureRequest{}
+ res := &pb.CheckOAuthSignatureResponse{}
+
+ err := internal.Call(c, "user", "CheckOAuthSignature", req, res)
+ if err != nil {
+ return "", err
+ }
+ return *res.OauthConsumerKey, err
+}
diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go
new file mode 100644
index 000000000..eb76f59b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user // import "google.golang.org/appengine/user"
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+ Email string
+ AuthDomain string
+ Admin bool
+
+ // ID is the unique permanent ID of the user.
+ // It is populated if the Email is associated
+ // with a Google account, or empty otherwise.
+ ID string
+
+ // ClientID is the ID of the pre-registered client so its identity can be verified.
+ // See https://developers.google.com/console/help/#generatingoauth2 for more information.
+ ClientID string
+
+ FederatedIdentity string
+ FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+ if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+ return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+ }
+ if u.FederatedIdentity != "" {
+ return u.FederatedIdentity
+ }
+ return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c context.Context, dest string) (string, error) {
+ return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c context.Context, dest, identity string) (string, error) {
+ req := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ if identity != "" {
+ req.FederatedIdentity = proto.String(identity)
+ }
+ res := &pb.CreateLoginURLResponse{}
+ if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c context.Context, dest string) (string, error) {
+ req := &pb.CreateLogoutURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ res := &pb.CreateLogoutURLResponse{}
+ if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LogoutUrl, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go
new file mode 100644
index 000000000..81315094c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_classic.go
@@ -0,0 +1,44 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package user
+
+import (
+ "appengine/user"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func Current(ctx context.Context) *User {
+ c, err := internal.ClassicContextFromContext(ctx)
+ if err != nil {
+ panic(err)
+ }
+ u := user.Current(c)
+ if u == nil {
+ return nil
+ }
+ // Map appengine/user.User to this package's User type.
+ return &User{
+ Email: u.Email,
+ AuthDomain: u.AuthDomain,
+ Admin: u.Admin,
+ ID: u.ID,
+ FederatedIdentity: u.FederatedIdentity,
+ FederatedProvider: u.FederatedProvider,
+ }
+}
+
+func IsAdmin(ctx context.Context) bool {
+ c, err := internal.ClassicContextFromContext(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ return user.IsAdmin(c)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_test.go b/vendor/google.golang.org/appengine/user/user_test.go
new file mode 100644
index 000000000..5fc5957a8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_test.go
@@ -0,0 +1,99 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+func baseReq() *http.Request {
+ return &http.Request{
+ Header: http.Header{},
+ }
+}
+
+type basicUserTest struct {
+ nickname, email, authDomain, admin string
+ // expectations
+ isNil, isAdmin bool
+ displayName string
+}
+
+var basicUserTests = []basicUserTest{
+ {"", "", "", "0", true, false, ""},
+ {"ken", "ken@example.com", "example.com", "0", false, false, "ken"},
+ {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"},
+}
+
+func TestBasicUserAPI(t *testing.T) {
+ for i, tc := range basicUserTests {
+ req := baseReq()
+ req.Header.Set("X-AppEngine-User-Nickname", tc.nickname)
+ req.Header.Set("X-AppEngine-User-Email", tc.email)
+ req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain)
+ req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin)
+
+ c := internal.ContextForTesting(req)
+
+ if ga := IsAdmin(c); ga != tc.isAdmin {
+ t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga)
+ }
+
+ u := Current(c)
+ if tc.isNil {
+ if u != nil {
+ t.Errorf("test %d: expected u == nil, got %+v", i, u)
+ }
+ continue
+ }
+ if u == nil {
+ t.Errorf("test %d: expected u != nil, got nil", i)
+ continue
+ }
+ if u.Email != tc.email {
+ t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email)
+ }
+ if gs := u.String(); gs != tc.displayName {
+ t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs)
+ }
+ if u.Admin != tc.isAdmin {
+ t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin)
+ }
+ }
+}
+
+func TestLoginURL(t *testing.T) {
+ expectedQuery := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String("/destination"),
+ }
+ const expectedDest = "/redir/dest"
+ c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error {
+ if !proto.Equal(req, expectedQuery) {
+ return fmt.Errorf("got %v, want %v", req, expectedQuery)
+ }
+ res.LoginUrl = proto.String(expectedDest)
+ return nil
+ })
+
+ url, err := LoginURL(c, "/destination")
+ if err != nil {
+ t.Fatalf("LoginURL failed: %v", err)
+ }
+ if url != expectedDest {
+ t.Errorf("got %v, want %v", url, expectedDest)
+ }
+}
+
+// TODO(dsymonds): Add test for LogoutURL.
diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go
new file mode 100644
index 000000000..8dc672e92
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_vm.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c context.Context) *User {
+ h := internal.IncomingHeaders(c)
+ u := &User{
+ Email: h.Get("X-AppEngine-User-Email"),
+ AuthDomain: h.Get("X-AppEngine-Auth-Domain"),
+ ID: h.Get("X-AppEngine-User-Id"),
+ Admin: h.Get("X-AppEngine-User-Is-Admin") == "1",
+ FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"),
+ FederatedProvider: h.Get("X-AppEngine-Federated-Provider"),
+ }
+ if u.Email == "" && u.FederatedIdentity == "" {
+ return nil
+ }
+ return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c context.Context) bool {
+ h := internal.IncomingHeaders(c)
+ return h.Get("X-AppEngine-User-Is-Admin") == "1"
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go
new file mode 100644
index 000000000..3a561fd53
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go
@@ -0,0 +1,253 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package xmpp provides the means to send and receive instant messages
+to and from users of XMPP-compatible services.
+
+To send a message,
+ m := &xmpp.Message{
+ To: []string{"kaylee@example.com"},
+ Body: `Hi! How's the carrot?`,
+ }
+ err := m.Send(c)
+
+To receive messages,
+ func init() {
+ xmpp.Handle(handleChat)
+ }
+
+ func handleChat(c context.Context, m *xmpp.Message) {
+ // ...
+ }
+*/
+package xmpp // import "google.golang.org/appengine/xmpp"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+// Message represents an incoming chat message.
+type Message struct {
+ // Sender is the JID of the sender.
+ // Optional for outgoing messages.
+ Sender string
+
+ // To is the intended recipients of the message.
+ // Incoming messages will have exactly one element.
+ To []string
+
+ // Body is the body of the message.
+ Body string
+
+ // Type is the message type, per RFC 3921.
+ // It defaults to "chat".
+ Type string
+
+ // RawXML is whether the body contains raw XML.
+ RawXML bool
+}
+
+// Presence represents an outgoing presence update.
+type Presence struct {
+ // Sender is the JID (optional).
+ Sender string
+
+ // The intended recipient of the presence update.
+ To string
+
+ // Type, per RFC 3921 (optional). Defaults to "available".
+ Type string
+
+ // State of presence (optional).
+ // Valid values: "away", "chat", "xa", "dnd" (RFC 3921).
+ State string
+
+ // Free text status message (optional).
+ Status string
+}
+
+var (
+ ErrPresenceUnavailable = errors.New("xmpp: presence unavailable")
+ ErrInvalidJID = errors.New("xmpp: invalid JID")
+)
+
+// Handle arranges for f to be called for incoming XMPP messages.
+// Only messages of type "chat" or "normal" will be handled.
+func Handle(f func(c context.Context, m *Message)) {
+ http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) {
+ f(appengine.NewContext(r), &Message{
+ Sender: r.FormValue("from"),
+ To: []string{r.FormValue("to")},
+ Body: r.FormValue("body"),
+ })
+ })
+}
+
+// Send sends a message.
+// If any failures occur with specific recipients, the error will be an appengine.MultiError.
+func (m *Message) Send(c context.Context) error {
+ req := &pb.XmppMessageRequest{
+ Jid: m.To,
+ Body: &m.Body,
+ RawXml: &m.RawXML,
+ }
+ if m.Type != "" && m.Type != "chat" {
+ req.Type = &m.Type
+ }
+ if m.Sender != "" {
+ req.FromJid = &m.Sender
+ }
+ res := &pb.XmppMessageResponse{}
+ if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil {
+ return err
+ }
+
+ if len(res.Status) != len(req.Jid) {
+ return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status))
+ }
+ me, any := make(appengine.MultiError, len(req.Jid)), false
+ for i, st := range res.Status {
+ if st != pb.XmppMessageResponse_NO_ERROR {
+ me[i] = errors.New(st.String())
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Invite sends an invitation. If the from address is an empty string
+// the default (yourapp@appspot.com/bot) will be used.
+func Invite(c context.Context, to, from string) error {
+ req := &pb.XmppInviteRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.XmppInviteResponse{}
+ return internal.Call(c, "xmpp", "SendInvite", req, res)
+}
+
+// Send sends a presence update.
+func (p *Presence) Send(c context.Context) error {
+ req := &pb.XmppSendPresenceRequest{
+ Jid: &p.To,
+ }
+ if p.State != "" {
+ req.Show = &p.State
+ }
+ if p.Type != "" {
+ req.Type = &p.Type
+ }
+ if p.Sender != "" {
+ req.FromJid = &p.Sender
+ }
+ if p.Status != "" {
+ req.Status = &p.Status
+ }
+ res := &pb.XmppSendPresenceResponse{}
+ return internal.Call(c, "xmpp", "SendPresence", req, res)
+}
+
+var presenceMap = map[pb.PresenceResponse_SHOW]string{
+ pb.PresenceResponse_NORMAL: "",
+ pb.PresenceResponse_AWAY: "away",
+ pb.PresenceResponse_DO_NOT_DISTURB: "dnd",
+ pb.PresenceResponse_CHAT: "chat",
+ pb.PresenceResponse_EXTENDED_AWAY: "xa",
+}
+
+// GetPresence retrieves a user's presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// ErrPresenceUnavailable is returned if the presence is unavailable.
+func GetPresence(c context.Context, to string, from string) (string, error) {
+ req := &pb.PresenceRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.PresenceResponse{}
+ if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil {
+ return "", err
+ }
+ if !*res.IsAvailable || res.Presence == nil {
+ return "", ErrPresenceUnavailable
+ }
+ presence, ok := presenceMap[*res.Presence]
+ if ok {
+ return presence, nil
+ }
+ return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence)
+}
+
+// GetPresenceMulti retrieves multiple users' presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// If any presence is unavailable, an appengine.MultiError is returned
+func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) {
+ req := &pb.BulkPresenceRequest{
+ Jid: to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.BulkPresenceResponse{}
+
+ if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil {
+ return nil, err
+ }
+
+ presences := make([]string, 0, len(res.PresenceResponse))
+ errs := appengine.MultiError{}
+
+ addResult := func(presence string, err error) {
+ presences = append(presences, presence)
+ errs = append(errs, err)
+ }
+
+ anyErr := false
+ for _, subres := range res.PresenceResponse {
+ if !subres.GetValid() {
+ anyErr = true
+ addResult("", ErrInvalidJID)
+ continue
+ }
+ if !*subres.IsAvailable || subres.Presence == nil {
+ anyErr = true
+ addResult("", ErrPresenceUnavailable)
+ continue
+ }
+ presence, ok := presenceMap[*subres.Presence]
+ if ok {
+ addResult(presence, nil)
+ } else {
+ anyErr = true
+ addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence))
+ }
+ }
+ if anyErr {
+ return presences, errs
+ }
+ return presences, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp_test.go b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
new file mode 100644
index 000000000..c3030d36d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
@@ -0,0 +1,173 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package xmpp
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse {
+ return &pb.PresenceResponse{
+ IsAvailable: proto.Bool(isAvailable),
+ Presence: presence.Enum(),
+ Valid: proto.Bool(valid),
+ }
+}
+
+func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) {
+ m.IsAvailable = &isAvailable
+ m.Presence = presence.Enum()
+ m.Valid = &valid
+}
+
+func TestGetPresence(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error {
+ if jid := in.GetJid(); jid != "user@example.com" {
+ return fmt.Errorf("bad jid %q", jid)
+ }
+ setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true)
+ return nil
+ })
+
+ presence, err := GetPresence(c, "user@example.com", "")
+ if err != nil {
+ t.Fatalf("GetPresence: %v", err)
+ }
+
+ if presence != "chat" {
+ t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT)
+ }
+}
+
+func TestGetPresenceMultiSingleJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ }
+ return nil
+ })
+
+ presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ if !reflect.DeepEqual(presence, []string{""}) {
+ t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""})
+ }
+}
+
+func TestGetPresenceMultiJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_AWAY, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "away"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiFromJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ if jid := in.GetFromJid(); jid != "bot@appspot.com" {
+ return fmt.Errorf("bad from jid %q", jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "bot@appspot.com")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "chat"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiInvalid(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, false),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{nil, ErrInvalidJID}
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+
+ want := []string{"xa", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiUnavailable(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(false, pb.PresenceResponse_AWAY, true),
+ newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{
+ ErrPresenceUnavailable,
+ ErrPresenceUnavailable,
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+ want := []string{"", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}