From 6d8f122a5160f6d9e4c51579f2429dfaa62c7271 Mon Sep 17 00:00:00 2001 From: Christopher Speller Date: Fri, 16 Feb 2018 06:47:51 -0800 Subject: Upgrading server dependancies (#8308) --- vendor/gopkg.in/olivere/elastic.v5/.travis.yml | 2 +- vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS | 11 +- vendor/gopkg.in/olivere/elastic.v5/README.md | 16 +- .../gopkg.in/olivere/elastic.v5/bulk_processor.go | 61 +- vendor/gopkg.in/olivere/elastic.v5/client.go | 8 +- vendor/gopkg.in/olivere/elastic.v5/errors.go | 8 + vendor/gopkg.in/olivere/elastic.v5/msearch.go | 39 +- vendor/gopkg.in/olivere/elastic.v5/msearch_test.go | 105 ++ .../elastic.v5/recipes/bulk_processor/main.go | 149 ++ vendor/gopkg.in/olivere/elastic.v5/reindex.go | 10 + vendor/gopkg.in/olivere/elastic.v5/run-es.sh | 4 +- vendor/gopkg.in/olivere/elastic.v5/search_aggs.go | 70 + .../elastic.v5/search_aggs_bucket_composite.go | 498 +++++++ .../search_aggs_bucket_composite_test.go | 92 ++ .../elastic.v5/search_aggs_bucket_date_range.go | 9 + .../search_aggs_bucket_date_range_test.go | 4 +- .../olivere/elastic.v5/search_aggs_test.go | 441 ++++-- .../olivere/elastic.v5/search_queries_terms_set.go | 96 ++ .../elastic.v5/search_queries_terms_set_test.go | 75 + .../gopkg.in/olivere/elastic.v5/search_request.go | 49 +- vendor/gopkg.in/olivere/elastic.v5/search_test.go | 55 + .../gopkg.in/square/go-jose.v1/.gitcookies.sh.enc | 1 - vendor/gopkg.in/square/go-jose.v1/.gitignore | 7 - vendor/gopkg.in/square/go-jose.v1/.travis.yml | 45 - vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md | 10 - vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md | 14 - vendor/gopkg.in/square/go-jose.v1/LICENSE | 202 --- vendor/gopkg.in/square/go-jose.v1/README.md | 212 --- vendor/gopkg.in/square/go-jose.v1/asymmetric.go | 520 ------- .../gopkg.in/square/go-jose.v1/asymmetric_test.go | 468 ------- .../gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go | 196 --- .../square/go-jose.v1/cipher/cbc_hmac_test.go | 498 ------- .../square/go-jose.v1/cipher/concat_kdf.go | 75 - .../square/go-jose.v1/cipher/concat_kdf_test.go | 150 -- .../gopkg.in/square/go-jose.v1/cipher/ecdh_es.go | 62 - .../square/go-jose.v1/cipher/ecdh_es_test.go | 115 -- .../gopkg.in/square/go-jose.v1/cipher/key_wrap.go | 109 -- .../square/go-jose.v1/cipher/key_wrap_test.go | 133 -- vendor/gopkg.in/square/go-jose.v1/crypter.go | 416 ------ vendor/gopkg.in/square/go-jose.v1/crypter_test.go | 785 ----------- vendor/gopkg.in/square/go-jose.v1/doc.go | 26 - vendor/gopkg.in/square/go-jose.v1/doc_test.go | 226 --- vendor/gopkg.in/square/go-jose.v1/encoding.go | 193 --- vendor/gopkg.in/square/go-jose.v1/encoding_test.go | 173 --- .../gopkg.in/square/go-jose.v1/jose-util/README.md | 59 - .../square/go-jose.v1/jose-util/jose-util.t | 94 -- .../gopkg.in/square/go-jose.v1/jose-util/main.go | 189 --- vendor/gopkg.in/square/go-jose.v1/json/LICENSE | 27 - vendor/gopkg.in/square/go-jose.v1/json/README.md | 13 - .../gopkg.in/square/go-jose.v1/json/bench_test.go | 223 --- vendor/gopkg.in/square/go-jose.v1/json/decode.go | 1183 ---------------- .../gopkg.in/square/go-jose.v1/json/decode_test.go | 1474 -------------------- vendor/gopkg.in/square/go-jose.v1/json/encode.go | 1197 ---------------- .../gopkg.in/square/go-jose.v1/json/encode_test.go | 538 ------- vendor/gopkg.in/square/go-jose.v1/json/indent.go | 141 -- .../gopkg.in/square/go-jose.v1/json/number_test.go | 133 -- vendor/gopkg.in/square/go-jose.v1/json/scanner.go | 623 --------- .../square/go-jose.v1/json/scanner_test.go | 316 ----- vendor/gopkg.in/square/go-jose.v1/json/stream.go | 480 ------- .../gopkg.in/square/go-jose.v1/json/stream_test.go | 354 ----- .../gopkg.in/square/go-jose.v1/json/tagkey_test.go | 115 -- vendor/gopkg.in/square/go-jose.v1/json/tags.go | 44 - .../gopkg.in/square/go-jose.v1/json/tags_test.go | 28 - .../square/go-jose.v1/json/testdata/code.json.gz | Bin 120432 -> 0 bytes .../gopkg.in/square/go-jose.v1/json_fork_test.go | 116 -- vendor/gopkg.in/square/go-jose.v1/jwe.go | 280 ---- vendor/gopkg.in/square/go-jose.v1/jwe_test.go | 537 ------- vendor/gopkg.in/square/go-jose.v1/jwk.go | 457 ------ vendor/gopkg.in/square/go-jose.v1/jwk_test.go | 662 --------- vendor/gopkg.in/square/go-jose.v1/jws.go | 272 ---- vendor/gopkg.in/square/go-jose.v1/jws_test.go | 312 ----- vendor/gopkg.in/square/go-jose.v1/shared.go | 224 --- vendor/gopkg.in/square/go-jose.v1/signing.go | 258 ---- vendor/gopkg.in/square/go-jose.v1/signing_test.go | 451 ------ vendor/gopkg.in/square/go-jose.v1/symmetric.go | 349 ----- .../gopkg.in/square/go-jose.v1/symmetric_test.go | 131 -- vendor/gopkg.in/square/go-jose.v1/utils.go | 74 - vendor/gopkg.in/square/go-jose.v1/utils_test.go | 225 --- 78 files changed, 1621 insertions(+), 16396 deletions(-) create mode 100644 vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_processor/main.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite_test.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set.go create mode 100644 vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc delete mode 100644 vendor/gopkg.in/square/go-jose.v1/.gitignore delete mode 100644 vendor/gopkg.in/square/go-jose.v1/.travis.yml delete mode 100644 vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v1/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/asymmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/crypter.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/crypter_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/doc.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/doc_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/encoding.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/encoding_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jose-util/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jose-util/main.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/bench_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/decode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/decode_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/encode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/encode_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/indent.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/number_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/scanner.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/stream.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/stream_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/tags.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/tags_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json_fork_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwe.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwe_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwk.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwk_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jws.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jws_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/shared.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/signing.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/signing_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/symmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/symmetric_test.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/utils.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/utils_test.go (limited to 'vendor/gopkg.in') diff --git a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml index b4322c13c..9658f873a 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml +++ b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml @@ -12,4 +12,4 @@ services: - docker before_install: - sudo sysctl -w vm.max_map_count=262144 - - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.1.2 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ + - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.1 elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS index d7f7f780f..ba06dac29 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS @@ -68,9 +68,11 @@ Joe Buck [@four2five](https://github.com/four2five) John Barker [@j16r](https://github.com/j16r) John Goodall [@jgoodall](https://github.com/jgoodall) John Stanford [@jxstanford](https://github.com/jxstanford) +Jonas Groenaas Drange [@semafor](https://github.com/semafor) Josh Chorlton [@jchorl](https://github.com/jchorl) jun [@coseyo](https://github.com/coseyo) Junpei Tsuji [@jun06t](https://github.com/jun06t) +kartlee [@kartlee](https://github.com/kartlee) Keith Hatton [@khatton-ft](https://github.com/khatton-ft) kel [@liketic](https://github.com/liketic) Kenta SUZUKI [@suzuken](https://github.com/suzuken) @@ -98,10 +100,13 @@ Orne Brocaar [@brocaar](https://github.com/brocaar) Paul [@eyeamera](https://github.com/eyeamera) Pete C [@peteclark-ft](https://github.com/peteclark-ft) Radoslaw Wesolowski [r--w](https://github.com/r--w) +Roman Colohanin [@zuzmic](https://github.com/zuzmic) Ryan Schmukler [@rschmukler](https://github.com/rschmukler) +Ryan Wynn [@rwynn](https://github.com/rwynn) Sacheendra talluri [@sacheendra](https://github.com/sacheendra) Sean DuBois [@Sean-Der](https://github.com/Sean-Der) Shalin LK [@shalinlk](https://github.com/shalinlk) +singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) Stuart Warren [@Woz](https://github.com/stuart-warren) Sulaiman [@salajlan](https://github.com/salajlan) @@ -111,13 +116,13 @@ Take [ww24](https://github.com/ww24) Tetsuya Morimoto [@t2y](https://github.com/t2y) TimeEmit [@TimeEmit](https://github.com/timeemit) TusharM [@tusharm](https://github.com/tusharm) -zhangxin [@visaxin](https://github.com/visaxin) wangtuo [@wangtuo](https://github.com/wangtuo) Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri) wolfkdy [@wolfkdy](https://github.com/wolfkdy) Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb) Yarden Bar [@ayashjorden](https://github.com/ayashjorden) zakthomas [@zakthomas](https://github.com/zakthomas) -singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) +Yuya Kusakabe [@higebu](https://github.com/higebu) +Zach [@snowzach](https://github.com/snowzach) +zhangxin [@visaxin](https://github.com/visaxin) @林 [@zplzpl](https://github.com/zplzpl) -Roman Colohanin [@zuzmic](https://github.com/zuzmic) diff --git a/vendor/gopkg.in/olivere/elastic.v5/README.md b/vendor/gopkg.in/olivere/elastic.v5/README.md index f452b664d..d0cdd7821 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/README.md +++ b/vendor/gopkg.in/olivere/elastic.v5/README.md @@ -199,6 +199,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Significant Terms - [x] Significant Text - [x] Terms + - [x] Composite - Pipeline Aggregations - [x] Avg Bucket - [x] Derivative @@ -212,6 +213,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Cumulative Sum - [x] Bucket Script - [x] Bucket Selector + - [ ] Bucket Sort - [x] Serial Differencing - [x] Matrix Aggregations - [x] Matrix Stats @@ -234,17 +236,17 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - [x] Update Indices Settings - [x] Get Settings - [x] Analyze + - [x] Explain Analyze - [x] Index Templates -- [ ] Shadow Replica Indices - [x] Indices Stats - [x] Indices Segments - [ ] Indices Recovery - [ ] Indices Shard Stores - [ ] Clear Cache - [x] Flush + - [x] Synced Flush - [x] Refresh - [x] Force Merge -- [ ] Upgrade ### cat APIs @@ -267,6 +269,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] cat shards - [ ] cat segments - [ ] cat snapshots +- [ ] cat templates ### Cluster APIs @@ -278,6 +281,8 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] Cluster Update Settings - [x] Nodes Stats - [x] Nodes Info +- [ ] Nodes Feature Usage +- [ ] Remote Cluster Info - [x] Task Management API - [ ] Nodes hot_threads - [ ] Cluster Allocation Explain API @@ -297,6 +302,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - Term level queries - [x] Term Query - [x] Terms Query + - [x] Terms Set Query - [x] Range Query - [x] Exists Query - [x] Prefix Query @@ -311,7 +317,6 @@ The cat APIs are not implemented as of now. We think they are better suited for - [x] Dis Max Query - [x] Function Score Query - [x] Boosting Query - - [x] Indices Query - Joining queries - [x] Nested Query - [x] Has Child Query @@ -321,12 +326,9 @@ The cat APIs are not implemented as of now. We think they are better suited for - [ ] GeoShape Query - [x] Geo Bounding Box Query - [x] Geo Distance Query - - [ ] Geo Distance Range Query - [x] Geo Polygon Query - - [ ] Geohash Cell Query - Specialized queries - [x] More Like This Query - - [x] Template Query - [x] Script Query - [x] Percolate Query - Span queries @@ -346,7 +348,7 @@ The cat APIs are not implemented as of now. We think they are better suited for - Snapshot and Restore - [x] Repositories - - [ ] Snapshot + - [x] Snapshot - [ ] Restore - [ ] Snapshot status - [ ] Monitoring snapshot/restore status diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go index b2709a880..6ee8a3dee 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go @@ -6,6 +6,7 @@ package elastic import ( "context" + "net" "sync" "sync/atomic" "time" @@ -121,7 +122,7 @@ func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { return s } -// Set the backoff strategy to use for errors +// Backoff sets the backoff strategy to use for errors. func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService { s.backoff = backoff return s @@ -248,6 +249,8 @@ type BulkProcessor struct { statsMu sync.Mutex // guards the following block stats *BulkProcessorStats + + stopReconnC chan struct{} // channel to signal stop reconnection attempts } func newBulkProcessor( @@ -293,6 +296,7 @@ func (p *BulkProcessor) Start(ctx context.Context) error { p.requestsC = make(chan BulkableRequest) p.executionId = 0 p.stats = newBulkProcessorStats(p.numWorkers) + p.stopReconnC = make(chan struct{}) // Create and start up workers. p.workers = make([]*bulkWorker, p.numWorkers) @@ -331,6 +335,12 @@ func (p *BulkProcessor) Close() error { return nil } + // Tell connection checkers to stop + if p.stopReconnC != nil { + close(p.stopReconnC) + p.stopReconnC = nil + } + // Stop flusher (if enabled) if p.flusherStopC != nil { p.flusherStopC <- struct{}{} @@ -436,29 +446,43 @@ func (w *bulkWorker) work(ctx context.Context) { var stop bool for !stop { + var err error select { case req, open := <-w.p.requestsC: if open { // Received a new request w.service.Add(req) if w.commitRequired() { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } } else { // Channel closed: Stop. stop = true if w.service.NumberOfActions() > 0 { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } } case <-w.flushC: // Commit outstanding requests if w.service.NumberOfActions() > 0 { - w.commit(ctx) // TODO swallow errors here? + err = w.commit(ctx) } w.flushAckC <- struct{}{} } + if !stop && err != nil { + waitForActive := func() { + // Add back pressure to prevent Add calls from filling up the request queue + ready := make(chan struct{}) + go w.waitForActiveConnection(ready) + <-ready + } + if _, ok := err.(net.Error); ok { + waitForActive() + } else if IsConnErr(err) { + waitForActive() + } + } } } @@ -511,6 +535,35 @@ func (w *bulkWorker) commit(ctx context.Context) error { return err } +func (w *bulkWorker) waitForActiveConnection(ready chan<- struct{}) { + defer close(ready) + + t := time.NewTicker(5 * time.Second) + defer t.Stop() + + client := w.p.c + stopReconnC := w.p.stopReconnC + w.p.c.errorf("elastic: bulk processor %q is waiting for an active connection", w.p.name) + + // loop until a health check finds at least 1 active connection or the reconnection channel is closed + for { + select { + case _, ok := <-stopReconnC: + if !ok { + w.p.c.errorf("elastic: bulk processor %q active connection check interrupted", w.p.name) + return + } + case <-t.C: + client.healthcheck(time.Duration(3)*time.Second, true) + if client.mustActiveConn() == nil { + // found an active connection + // exit and signal done to the WaitGroup + return + } + } + } +} + func (w *bulkWorker) updateStats(res *BulkResponse) { // Update stats if res != nil { diff --git a/vendor/gopkg.in/olivere/elastic.v5/client.go b/vendor/gopkg.in/olivere/elastic.v5/client.go index 1eb0ec54f..165a30526 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/client.go +++ b/vendor/gopkg.in/olivere/elastic.v5/client.go @@ -26,7 +26,7 @@ import ( const ( // Version is the current version of Elastic. - Version = "6.1.4" + Version = "6.1.7" // DefaultURL is the default endpoint of Elasticsearch on the local machine. // It is used e.g. when initializing a new Client without a specific URL. @@ -1778,9 +1778,3 @@ func (c *Client) WaitForGreenStatus(timeout string) error { func (c *Client) WaitForYellowStatus(timeout string) error { return c.WaitForStatus("yellow", timeout) } - -// IsConnError unwraps the given error value and checks if it is equal to -// elastic.ErrNoClient. -func IsConnErr(err error) bool { - return errors.Cause(err) == ErrNoClient -} diff --git a/vendor/gopkg.in/olivere/elastic.v5/errors.go b/vendor/gopkg.in/olivere/elastic.v5/errors.go index 00a936621..e40cda845 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/errors.go +++ b/vendor/gopkg.in/olivere/elastic.v5/errors.go @@ -9,6 +9,8 @@ import ( "fmt" "io/ioutil" "net/http" + + "github.com/pkg/errors" ) // checkResponse will return an error if the request/response indicates @@ -89,6 +91,12 @@ func (e *Error) Error() string { } } +// IsConnErr returns true if the error indicates that Elastic could not +// find an Elasticsearch host to connect to. +func IsConnErr(err error) bool { + return err == ErrNoClient || errors.Cause(err) == ErrNoClient +} + // IsNotFound returns true if the given error indicates that Elasticsearch // returned HTTP status 404. The err parameter can be of type *elastic.Error, // elastic.Error, *http.Response or int (indicating the HTTP status code). diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch.go b/vendor/gopkg.in/olivere/elastic.v5/msearch.go index ed54d3c2f..c1a589a97 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/msearch.go +++ b/vendor/gopkg.in/olivere/elastic.v5/msearch.go @@ -14,19 +14,17 @@ import ( // MultiSearch executes one or more searches in one roundtrip. type MultiSearchService struct { - client *Client - requests []*SearchRequest - indices []string - pretty bool - routing string - preference string + client *Client + requests []*SearchRequest + indices []string + pretty bool + maxConcurrentRequests *int + preFilterShardSize *int } func NewMultiSearchService(client *Client) *MultiSearchService { builder := &MultiSearchService{ - client: client, - requests: make([]*SearchRequest, 0), - indices: make([]string, 0), + client: client, } return builder } @@ -46,6 +44,16 @@ func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { return s } +func (s *MultiSearchService) MaxConcurrentSearches(max int) *MultiSearchService { + s.maxConcurrentRequests = &max + return s +} + +func (s *MultiSearchService) PreFilterShardSize(size int) *MultiSearchService { + s.preFilterShardSize = &size + return s +} + func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { // Build url path := "/_msearch" @@ -55,6 +63,12 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) if s.pretty { params.Set("pretty", fmt.Sprintf("%v", s.pretty)) } + if v := s.maxConcurrentRequests; v != nil { + params.Set("max_concurrent_searches", fmt.Sprintf("%v", *v)) + } + if v := s.preFilterShardSize; v != nil { + params.Set("pre_filter_shard_size", fmt.Sprintf("%v", *v)) + } // Set body var lines []string @@ -68,14 +82,14 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) if err != nil { return nil, err } - body, err := json.Marshal(sr.Body()) + body, err := sr.Body() if err != nil { return nil, err } lines = append(lines, string(header)) - lines = append(lines, string(body)) + lines = append(lines, body) } - body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + body := strings.Join(lines, "\n") + "\n" // add trailing \n // Get response res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ @@ -96,6 +110,7 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) return ret, nil } +// MultiSearchResult is the outcome of running a multi-search operation. type MultiSearchResult struct { Responses []*SearchResult `json:"responses,omitempty"` } diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go index 79f2047e6..d25e2cc28 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go @@ -13,6 +13,7 @@ import ( func TestMultiSearch(t *testing.T) { client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndexAndLog(t) tweet1 := tweet{ User: "olivere", @@ -60,6 +61,110 @@ func TestMultiSearch(t *testing.T) { sreq2 := NewSearchRequest().Index(testIndexName).Type("doc"). Source(NewSearchSource().Query(q2)) + searchResult, err := client.MultiSearch(). + Add(sreq1, sreq2). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 2 { + t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + + sres = searchResult.Responses[1] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 2 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 2 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMultiSearchWithStrings(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + // client := setupTestClientAndCreateIndexAndLog(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). + Source(`{"query":{"match_all":{}}}`) + sreq2 := NewSearchRequest().Index(testIndexName).Type("doc"). + Source(`{"query":{"term":{"tags":"golang"}}}`) + searchResult, err := client.MultiSearch(). Add(sreq1, sreq2). Do(context.TODO()) diff --git a/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_processor/main.go b/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_processor/main.go new file mode 100644 index 000000000..f13243297 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_processor/main.go @@ -0,0 +1,149 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// BulkProcessor runs a bulk processing job that fills an index +// given certain criteria like flush interval etc. +// +// Example +// +// bulk_processor -url=http://127.0.0.1:9200/bulk-processor-test?sniff=false -n=100000 -flush-interval=1s +// +package main + +import ( + "context" + "flag" + "fmt" + "log" + "math/rand" + "os" + "os/signal" + "sync/atomic" + "syscall" + "time" + + "github.com/google/uuid" + + "github.com/olivere/elastic" + "github.com/olivere/elastic/config" +) + +func main() { + var ( + url = flag.String("url", "http://localhost:9200/bulk-processor-test", "Elasticsearch URL") + numWorkers = flag.Int("num-workers", 4, "Number of workers") + n = flag.Int64("n", -1, "Number of documents to process (-1 for unlimited)") + flushInterval = flag.Duration("flush-interval", 1*time.Second, "Flush interval") + bulkActions = flag.Int("bulk-actions", 0, "Number of bulk actions before committing") + bulkSize = flag.Int("bulk-size", 0, "Size of bulk requests before committing") + ) + flag.Parse() + log.SetFlags(0) + + rand.Seed(time.Now().UnixNano()) + + // Parse configuration from URL + cfg, err := config.Parse(*url) + if err != nil { + log.Fatal(err) + } + + // Create an Elasticsearch client from the parsed config + client, err := elastic.NewClientFromConfig(cfg) + if err != nil { + log.Fatal(err) + } + + // Drop old index + exists, err := client.IndexExists(cfg.Index).Do(context.Background()) + if err != nil { + log.Fatal(err) + } + if exists { + _, err = client.DeleteIndex(cfg.Index).Do(context.Background()) + if err != nil { + log.Fatal(err) + } + } + + // Create processor + bulkp := elastic.NewBulkProcessorService(client). + Name("bulk-test-processor"). + Stats(true). + Backoff(elastic.StopBackoff{}). + FlushInterval(*flushInterval). + Workers(*numWorkers) + if *bulkActions > 0 { + bulkp = bulkp.BulkActions(*bulkActions) + } + if *bulkSize > 0 { + bulkp = bulkp.BulkSize(*bulkSize) + } + p, err := bulkp.Do(context.Background()) + if err != nil { + log.Fatal(err) + } + + var created int64 + errc := make(chan error, 1) + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + <-c + errc <- nil + }() + + go func() { + defer func() { + if err := p.Close(); err != nil { + errc <- err + } + }() + + type Doc struct { + Timestamp time.Time `json:"@timestamp"` + } + + for { + current := atomic.AddInt64(&created, 1) + if *n > 0 && current >= *n { + errc <- nil + return + } + r := elastic.NewBulkIndexRequest(). + Index(cfg.Index). + Type("doc"). + Id(uuid.New().String()). + Doc(Doc{Timestamp: time.Now()}) + p.Add(r) + + time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond) + } + }() + + go func() { + t := time.NewTicker(1 * time.Second) + defer t.Stop() + for range t.C { + stats := p.Stats() + written := atomic.LoadInt64(&created) + var queued int64 + for _, w := range stats.Workers { + queued += w.Queued + } + fmt.Printf("Queued=%5d Written=%8d Succeeded=%8d Failed=%8d Comitted=%6d Flushed=%6d\n", + queued, + written, + stats.Succeeded, + stats.Failed, + stats.Committed, + stats.Flushed, + ) + } + }() + + if err := <-errc; err != nil { + log.Fatal(err) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex.go b/vendor/gopkg.in/olivere/elastic.v5/reindex.go index 35440fa80..9cdd50a68 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/reindex.go +++ b/vendor/gopkg.in/olivere/elastic.v5/reindex.go @@ -20,6 +20,7 @@ type ReindexService struct { waitForActiveShards string waitForCompletion *bool requestsPerSecond *int + slices *int body interface{} source *ReindexSource destination *ReindexDestination @@ -51,6 +52,12 @@ func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexServic return s } +// Slices specifies the number of slices this task should be divided into. Defaults to 1. +func (s *ReindexService) Slices(slices int) *ReindexService { + s.slices = &slices + return s +} + // Refresh indicates whether Elasticsearch should refresh the effected indexes // immediately. func (s *ReindexService) Refresh(refresh string) *ReindexService { @@ -179,6 +186,9 @@ func (s *ReindexService) buildURL() (string, url.Values, error) { if s.requestsPerSecond != nil { params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) } + if s.slices != nil { + params.Set("slices", fmt.Sprintf("%v", *s.slices)) + } if s.waitForActiveShards != "" { params.Set("wait_for_active_shards", s.waitForActiveShards) } diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es.sh index 1f4a851d4..624a864ed 100755 --- a/vendor/gopkg.in/olivere/elastic.v5/run-es.sh +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es.sh @@ -1,3 +1,3 @@ #!/bin/sh -VERSION=${VERSION:=6.1.2} -docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:$VERSION elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ +VERSION=${VERSION:=6.2.1} +docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:$VERSION elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go index c5082b2b1..6359611b1 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go @@ -653,6 +653,23 @@ func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, return nil, false } +// Composite returns composite bucket aggregation results. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html +// for details. +func (a Aggregations) Composite(name string) (*AggregationBucketCompositeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketCompositeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + // -- Single value metric -- // AggregationValueMetric is a single-value metric, returned e.g. by a @@ -1448,3 +1465,56 @@ func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error a.Aggregations = aggs return nil } + +// -- Composite key items -- + +// AggregationBucketCompositeItems implements the response structure +// for a bucket aggregation of type composite. +type AggregationBucketCompositeItems struct { + Aggregations + + Buckets []*AggregationBucketCompositeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItems structure. +func (a *AggregationBucketCompositeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketCompositeItem is a single bucket of an AggregationBucketCompositeItems structure. +type AggregationBucketCompositeItem struct { + Aggregations + + Key map[string]interface{} //`json:"key"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItem structure. +func (a *AggregationBucketCompositeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite.go new file mode 100644 index 000000000..1d9132d2d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite.go @@ -0,0 +1,498 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CompositeAggregation is a multi-bucket values source based aggregation +// that can be used to calculate unique composite values from source documents. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html +// for details. +type CompositeAggregation struct { + after map[string]interface{} + size *int + sources []CompositeAggregationValuesSource + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewCompositeAggregation creates a new CompositeAggregation. +func NewCompositeAggregation() *CompositeAggregation { + return &CompositeAggregation{ + sources: make([]CompositeAggregationValuesSource, 0), + subAggregations: make(map[string]Aggregation), + } +} + +// Size represents the number of composite buckets to return. +// Defaults to 10 as of Elasticsearch 6.1. +func (a *CompositeAggregation) Size(size int) *CompositeAggregation { + a.size = &size + return a +} + +// AggregateAfter sets the values that indicate which composite bucket this +// request should "aggregate after". +func (a *CompositeAggregation) AggregateAfter(after map[string]interface{}) *CompositeAggregation { + a.after = after + return a +} + +// Sources specifies the list of CompositeAggregationValuesSource instances to +// use in the aggregation. +func (a *CompositeAggregation) Sources(sources ...CompositeAggregationValuesSource) *CompositeAggregation { + a.sources = append(a.sources, sources...) + return a +} + +// SubAggregations of this aggregation. +func (a *CompositeAggregation) SubAggregation(name string, subAggregation Aggregation) *CompositeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CompositeAggregation) Meta(metaData map[string]interface{}) *CompositeAggregation { + a.meta = metaData + return a +} + +// Source returns the serializable JSON for this aggregation. +func (a *CompositeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "my_composite_agg" : { + // "composite" : { + // "sources": [ + // {"my_term": { "terms": { "field": "product" }}}, + // {"my_histo": { "histogram": { "field": "price", "interval": 5 }}}, + // {"my_date": { "date_histogram": { "field": "timestamp", "interval": "1d" }}}, + // ], + // "size" : 10, + // "after" : ["a", 2, "c"] + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["composite"] = opts + + sources := make([]interface{}, len(a.sources)) + for i, s := range a.sources { + src, err := s.Source() + if err != nil { + return nil, err + } + sources[i] = src + } + opts["sources"] = sources + + if a.size != nil { + opts["size"] = *a.size + } + + if a.after != nil { + opts["after"] = a.after + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Generic interface for CompositeAggregationValues -- + +// CompositeAggregationValuesSource specifies the interface that +// all implementations for CompositeAggregation's Sources method +// need to implement. +// +// The different implementations are described in +// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_values_source_2. +type CompositeAggregationValuesSource interface { + Source() (interface{}, error) +} + +// -- CompositeAggregationTermsValuesSource -- + +// CompositeAggregationTermsValuesSource is a source for the CompositeAggregation that handles terms +// it works very similar to a terms aggregation with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_terms +// for details. +type CompositeAggregationTermsValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string +} + +// NewCompositeAggregationTermsValuesSource creates and initializes +// a new CompositeAggregationTermsValuesSource. +func NewCompositeAggregationTermsValuesSource(name string) *CompositeAggregationTermsValuesSource { + return &CompositeAggregationTermsValuesSource{ + name: name, + } +} + +// Field to use for this source. +func (a *CompositeAggregationTermsValuesSource) Field(field string) *CompositeAggregationTermsValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationTermsValuesSource) Script(script *Script) *CompositeAggregationTermsValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationTermsValuesSource) ValueType(valueType string) *CompositeAggregationTermsValuesSource { + a.valueType = valueType + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationTermsValuesSource) Order(order string) *CompositeAggregationTermsValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationTermsValuesSource) Asc() *CompositeAggregationTermsValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationTermsValuesSource) Desc() *CompositeAggregationTermsValuesSource { + a.order = "desc" + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationTermsValuesSource) Missing(missing interface{}) *CompositeAggregationTermsValuesSource { + a.missing = missing + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationTermsValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["terms"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + return source, nil + +} + +// -- CompositeAggregationHistogramValuesSource -- + +// CompositeAggregationHistogramValuesSource is a source for the CompositeAggregation that handles histograms +// it works very similar to a terms histogram with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_histogram +// for details. +type CompositeAggregationHistogramValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string + interval float64 +} + +// NewCompositeAggregationHistogramValuesSource creates and initializes +// a new CompositeAggregationHistogramValuesSource. +func NewCompositeAggregationHistogramValuesSource(name string, interval float64) *CompositeAggregationHistogramValuesSource { + return &CompositeAggregationHistogramValuesSource{ + name: name, + interval: interval, + } +} + +// Field to use for this source. +func (a *CompositeAggregationHistogramValuesSource) Field(field string) *CompositeAggregationHistogramValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationHistogramValuesSource) Script(script *Script) *CompositeAggregationHistogramValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationHistogramValuesSource) ValueType(valueType string) *CompositeAggregationHistogramValuesSource { + a.valueType = valueType + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationHistogramValuesSource { + a.missing = missing + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationHistogramValuesSource) Order(order string) *CompositeAggregationHistogramValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationHistogramValuesSource) Asc() *CompositeAggregationHistogramValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationHistogramValuesSource) Desc() *CompositeAggregationHistogramValuesSource { + a.order = "desc" + return a +} + +// Interval specifies the interval to use. +func (a *CompositeAggregationHistogramValuesSource) Interval(interval float64) *CompositeAggregationHistogramValuesSource { + a.interval = interval + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationHistogramValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["histogram"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + // Histogram-related properties + values["interval"] = a.interval + + return source, nil + +} + +// -- CompositeAggregationDateHistogramValuesSource -- + +// CompositeAggregationDateHistogramValuesSource is a source for the CompositeAggregation that handles date histograms +// it works very similar to a date histogram aggregation with slightly different syntax +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_date_histogram +// for details. +type CompositeAggregationDateHistogramValuesSource struct { + name string + field string + script *Script + valueType string + missing interface{} + order string + interval interface{} + timeZone string +} + +// NewCompositeAggregationDateHistogramValuesSource creates and initializes +// a new CompositeAggregationDateHistogramValuesSource. +func NewCompositeAggregationDateHistogramValuesSource(name string, interval interface{}) *CompositeAggregationDateHistogramValuesSource { + return &CompositeAggregationDateHistogramValuesSource{ + name: name, + interval: interval, + } +} + +// Field to use for this source. +func (a *CompositeAggregationDateHistogramValuesSource) Field(field string) *CompositeAggregationDateHistogramValuesSource { + a.field = field + return a +} + +// Script to use for this source. +func (a *CompositeAggregationDateHistogramValuesSource) Script(script *Script) *CompositeAggregationDateHistogramValuesSource { + a.script = script + return a +} + +// ValueType specifies the type of values produced by this source, +// e.g. "string" or "date". +func (a *CompositeAggregationDateHistogramValuesSource) ValueType(valueType string) *CompositeAggregationDateHistogramValuesSource { + a.valueType = valueType + return a +} + +// Missing specifies the value to use when the source finds a missing +// value in a document. +func (a *CompositeAggregationDateHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationDateHistogramValuesSource { + a.missing = missing + return a +} + +// Order specifies the order in the values produced by this source. +// It can be either "asc" or "desc". +func (a *CompositeAggregationDateHistogramValuesSource) Order(order string) *CompositeAggregationDateHistogramValuesSource { + a.order = order + return a +} + +// Asc ensures the order of the values produced is ascending. +func (a *CompositeAggregationDateHistogramValuesSource) Asc() *CompositeAggregationDateHistogramValuesSource { + a.order = "asc" + return a +} + +// Desc ensures the order of the values produced is descending. +func (a *CompositeAggregationDateHistogramValuesSource) Desc() *CompositeAggregationDateHistogramValuesSource { + a.order = "desc" + return a +} + +// Interval to use for the date histogram, e.g. "1d" or a numeric value like "60". +func (a *CompositeAggregationDateHistogramValuesSource) Interval(interval interface{}) *CompositeAggregationDateHistogramValuesSource { + a.interval = interval + return a +} + +// TimeZone to use for the dates. +func (a *CompositeAggregationDateHistogramValuesSource) TimeZone(timeZone string) *CompositeAggregationDateHistogramValuesSource { + a.timeZone = timeZone + return a +} + +// Source returns the serializable JSON for this values source. +func (a *CompositeAggregationDateHistogramValuesSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + name := make(map[string]interface{}) + source[a.name] = name + values := make(map[string]interface{}) + name["date_histogram"] = values + + // field + if a.field != "" { + values["field"] = a.field + } + + // script + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + values["script"] = src + } + + // missing + if a.missing != nil { + values["missing"] = a.missing + } + + // value_type + if a.valueType != "" { + values["value_type"] = a.valueType + } + + // order + if a.order != "" { + values["order"] = a.order + } + + // DateHistogram-related properties + values["interval"] = a.interval + + // timeZone + if a.timeZone != "" { + values["time_zone"] = a.timeZone + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite_test.go new file mode 100644 index 000000000..91d84dbdb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_composite_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCompositeAggregation(t *testing.T) { + agg := NewCompositeAggregation(). + Sources( + NewCompositeAggregationTermsValuesSource("my_terms").Field("a_term").Missing("N/A").Order("asc"), + NewCompositeAggregationHistogramValuesSource("my_histogram", 5).Field("price").Asc(), + NewCompositeAggregationDateHistogramValuesSource("my_date_histogram", "1d").Field("purchase_date").Desc(), + ). + Size(10). + AggregateAfter(map[string]interface{}{ + "my_terms": "1", + "my_histogram": 2, + "my_date_histogram": "3", + }) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"composite":{"after":{"my_date_histogram":"3","my_histogram":2,"my_terms":"1"},"size":10,"sources":[{"my_terms":{"terms":{"field":"a_term","missing":"N/A","order":"asc"}}},{"my_histogram":{"histogram":{"field":"price","interval":5,"order":"asc"}}},{"my_date_histogram":{"date_histogram":{"field":"purchase_date","interval":"1d","order":"desc"}}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationTermsValuesSource(t *testing.T) { + in := NewCompositeAggregationTermsValuesSource("products"). + Script(NewScript("doc['product'].value").Lang("painless")) + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"products":{"terms":{"script":{"lang":"painless","source":"doc['product'].value"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationHistogramValuesSource(t *testing.T) { + in := NewCompositeAggregationHistogramValuesSource("histo", 5). + Field("price") + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histo":{"histogram":{"field":"price","interval":5}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCompositeAggregationDateHistogramValuesSource(t *testing.T) { + in := NewCompositeAggregationDateHistogramValuesSource("date", "1d"). + Field("timestamp") + src, err := in.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date":{"date_histogram":{"field":"timestamp","interval":"1d"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go index 5407dadb8..714fd3e11 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go @@ -23,6 +23,7 @@ type DateRangeAggregation struct { meta map[string]interface{} keyed *bool unmapped *bool + timeZone string format string entries []DateRangeAggregationEntry } @@ -71,6 +72,11 @@ func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { return a } +func (a *DateRangeAggregation) TimeZone(timeZone string) *DateRangeAggregation { + a.timeZone = timeZone + return a +} + func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { a.format = format return a @@ -178,6 +184,9 @@ func (a *DateRangeAggregation) Source() (interface{}, error) { if a.unmapped != nil { opts["unmapped"] = *a.unmapped } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } if a.format != "" { opts["format"] = a.format } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go index d1c909f3e..89ed495f3 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range_test.go @@ -10,7 +10,7 @@ import ( ) func TestDateRangeAggregation(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at") + agg := NewDateRangeAggregation().Field("created_at").TimeZone("UTC") agg = agg.AddRange(nil, "2012-12-31") agg = agg.AddRange("2013-01-01", "2013-12-31") agg = agg.AddRange("2014-01-01", nil) @@ -23,7 +23,7 @@ func TestDateRangeAggregation(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}],"time_zone":"UTC"}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go index 9d6fa8d27..f1b6347b3 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go @@ -13,13 +13,15 @@ import ( ) func TestAggs(t *testing.T) { - // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) client := setupTestClientAndCreateIndex(t) - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } + /* + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + */ tweet1 := tweet{ User: "olivere", @@ -48,7 +50,7 @@ func TestAggs(t *testing.T) { } // Add all documents - _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) if err != nil { t.Fatal(err) } @@ -102,6 +104,11 @@ func TestAggs(t *testing.T) { topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) geoBoundsAgg := NewGeoBoundsAggregation().Field("location") geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5) + composite := NewCompositeAggregation().Sources( + NewCompositeAggregationTermsValuesSource("composite_users").Field("user"), + NewCompositeAggregationHistogramValuesSource("composite_retweets", 1).Field("retweets"), + NewCompositeAggregationDateHistogramValuesSource("composite_created", "1m").Field("created"), + ) // Run query builder := client.Search().Index(testIndexName).Query(all).Pretty(true) @@ -109,9 +116,7 @@ func TestAggs(t *testing.T) { builder = builder.Aggregation("users", usersAgg) builder = builder.Aggregation("retweets", retweetsAgg) builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) - if esversion >= "2.0" { - builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) - } + builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) builder = builder.Aggregation("minRetweets", minRetweetsAgg) builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) @@ -134,44 +139,41 @@ func TestAggs(t *testing.T) { builder = builder.Aggregation("top-tags", topTagsAgg) builder = builder.Aggregation("viewport", geoBoundsAgg) builder = builder.Aggregation("geohashed", geoHashAgg) - if esversion >= "1.4" { - // Unnamed filters - countByUserAgg := NewFiltersAggregation(). - Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) - builder = builder.Aggregation("countByUser", countByUserAgg) - // Named filters - countByUserAgg2 := NewFiltersAggregation(). - FilterWithName("olivere", NewTermQuery("user", "olivere")). - FilterWithName("sandrae", NewTermQuery("user", "sandrae")) - builder = builder.Aggregation("countByUser2", countByUserAgg2) - } - if esversion >= "2.0" { - // AvgBucket - dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("avgBucketDateHisto", dateHisto) - builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) - // MinBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("minBucketDateHisto", dateHisto) - builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) - // MaxBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("maxBucketDateHisto", dateHisto) - builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) - // SumBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("sumBucketDateHisto", dateHisto) - builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) - // MovAvg - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) - builder = builder.Aggregation("movingAvgDateHisto", dateHisto) - } + // Unnamed filters + countByUserAgg := NewFiltersAggregation(). + Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser", countByUserAgg) + // Named filters + countByUserAgg2 := NewFiltersAggregation(). + FilterWithName("olivere", NewTermQuery("user", "olivere")). + FilterWithName("sandrae", NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser2", countByUserAgg2) + // AvgBucket + dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("avgBucketDateHisto", dateHisto) + builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) + // MinBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("minBucketDateHisto", dateHisto) + builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) + // MaxBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("maxBucketDateHisto", dateHisto) + builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) + // SumBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("sumBucketDateHisto", dateHisto) + builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) + // MovAvg + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) + builder = builder.Aggregation("movingAvgDateHisto", dateHisto) + builder = builder.Aggregation("composite", composite) searchResult, err := builder.Do(context.TODO()) if err != nil { t.Fatal(err) @@ -308,26 +310,24 @@ func TestAggs(t *testing.T) { } // avgRetweetsWithMeta - if esversion >= "2.0" { - avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if avgMetaAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if avgMetaAggRes.Meta == nil { - t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) - } - metaDataValue, found := avgMetaAggRes.Meta["meta"] - if !found { - t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) - } - if flag, ok := metaDataValue.(bool); !ok { - t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) - } else if flag != true { - t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) - } + avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgMetaAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgMetaAggRes.Meta == nil { + t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) + } + metaDataValue, found := avgMetaAggRes.Meta["meta"] + if !found { + t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) + } + if flag, ok := metaDataValue.(bool); !ok { + t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) + } else if flag != true { + t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) } // minRetweets @@ -817,13 +817,11 @@ func TestAggs(t *testing.T) { if topTags == nil { t.Fatalf("expected != nil; got: nil") } - if esversion >= "1.4.0" { - if topTags.DocCountErrorUpperBound != 0 { - t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) - } - if topTags.SumOfOtherDocCount != 1 { - t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) - } + if topTags.DocCountErrorUpperBound != 0 { + t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) + } + if topTags.SumOfOtherDocCount != 1 { + t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) } if len(topTags.Buckets) != 3 { t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) @@ -924,62 +922,71 @@ func TestAggs(t *testing.T) { t.Fatalf("expected != nil; got: nil") } - if esversion >= "1.4" { - // Filters agg "countByUser" (unnamed) - countByUserAggRes, found := agg.Filters("countByUser") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if countByUserAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(countByUserAggRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) - } - if len(countByUserAggRes.NamedBuckets) != 0 { - t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets)) - } - if countByUserAggRes.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) - } - if countByUserAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) - } + // Filters agg "countByUser" (unnamed) + countByUserAggRes, found := agg.Filters("countByUser") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUserAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUserAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) + } + if len(countByUserAggRes.NamedBuckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets)) + } + if countByUserAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) + } + if countByUserAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) + } - // Filters agg "countByUser2" (named) - countByUser2AggRes, found := agg.Filters("countByUser2") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if countByUser2AggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(countByUser2AggRes.Buckets) != 0 { - t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets)) - } - if len(countByUser2AggRes.NamedBuckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets)) - } - b, found := countByUser2AggRes.NamedBuckets["olivere"] - if !found { - t.Fatalf("expected bucket %q; got: %v", "olivere", found) - } - if b == nil { - t.Fatalf("expected bucket %q; got: %v", "olivere", b) - } - if b.DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, b.DocCount) - } - b, found = countByUser2AggRes.NamedBuckets["sandrae"] - if !found { - t.Fatalf("expected bucket %q; got: %v", "sandrae", found) - } - if b == nil { - t.Fatalf("expected bucket %q; got: %v", "sandrae", b) - } - if b.DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, b.DocCount) - } + // Filters agg "countByUser2" (named) + countByUser2AggRes, found := agg.Filters("countByUser2") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUser2AggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUser2AggRes.Buckets) != 0 { + t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets)) + } + if len(countByUser2AggRes.NamedBuckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets)) + } + b, found := countByUser2AggRes.NamedBuckets["olivere"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "olivere", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "olivere", b) + } + if b.DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, b.DocCount) + } + b, found = countByUser2AggRes.NamedBuckets["sandrae"] + if !found { + t.Fatalf("expected bucket %q; got: %v", "sandrae", found) + } + if b == nil { + t.Fatalf("expected bucket %q; got: %v", "sandrae", b) + } + if b.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, b.DocCount) + } + + compositeAggRes, found := agg.Composite("composite") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if compositeAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if want, have := 3, len(compositeAggRes.Buckets); want != have { + t.Fatalf("expected %d; got: %d", want, have) } } @@ -3231,3 +3238,179 @@ func TestAggsPipelineSerialDiff(t *testing.T) { t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) } } + +func TestAggsComposite(t *testing.T) { + s := `{ + "the_composite" : { + "buckets" : [ + { + "key" : { + "composite_users" : "olivere", + "composite_retweets" : 0.0, + "composite_created" : 1349856720000 + }, + "doc_count" : 1 + }, + { + "key" : { + "composite_users" : "olivere", + "composite_retweets" : 108.0, + "composite_created" : 1355333880000 + }, + "doc_count" : 1 + }, + { + "key" : { + "composite_users" : "sandrae", + "composite_retweets" : 12.0, + "composite_created" : 1321009080000 + }, + "doc_count" : 1 + } + ] + } + }` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Composite("the_composite") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if want, have := 3, len(agg.Buckets); want != have { + t.Fatalf("expected aggregation buckets length = %v; got: %v", want, have) + } + + // 1st bucket + bucket := agg.Buckets[0] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found := bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok := v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "olivere", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok := v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 0.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1349856720000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + + // 2nd bucket + bucket = agg.Buckets[1] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found = bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok = v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "olivere", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 108.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1355333880000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + + // 3rd bucket + bucket = agg.Buckets[2] + if want, have := int64(1), bucket.DocCount; want != have { + t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have) + } + if want, have := 3, len(bucket.Key); want != have { + t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have) + } + v, found = bucket.Key["composite_users"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_users") + } + s, ok = v.(string) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := "sandrae", s; want != have { + t.Fatalf("expected to find bucket key value %q; got: %q", want, have) + } + v, found = bucket.Key["composite_retweets"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_retweets") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 12.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } + v, found = bucket.Key["composite_created"] + if !found { + t.Fatalf("expected to find bucket key %q", "composite_created") + } + f, ok = v.(float64) + if !ok { + t.Fatalf("expected to have bucket key of type string; got: %T", v) + } + if want, have := 1321009080000.0, f; want != have { + t.Fatalf("expected to find bucket key value %v; got: %v", want, have) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set.go new file mode 100644 index 000000000..be410a1a7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set.go @@ -0,0 +1,96 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsSetQuery returns any documents that match with at least +// one or more of the provided terms. The terms are not analyzed +// and thus must match exactly. The number of terms that must +// match varies per document and is either controlled by a +// minimum should match field or computed per document in a +// minimum should match script. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-terms-set-query.html +type TermsSetQuery struct { + name string + values []interface{} + minimumShouldMatchField string + minimumShouldMatchScript *Script + queryName string + boost *float64 +} + +// NewTermsSetQuery creates and initializes a new TermsSetQuery. +func NewTermsSetQuery(name string, values ...interface{}) *TermsSetQuery { + q := &TermsSetQuery{ + name: name, + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// MinimumShouldMatchField specifies the field to match. +func (q *TermsSetQuery) MinimumShouldMatchField(minimumShouldMatchField string) *TermsSetQuery { + q.minimumShouldMatchField = minimumShouldMatchField + return q +} + +// MinimumShouldMatchScript specifies the script to match. +func (q *TermsSetQuery) MinimumShouldMatchScript(minimumShouldMatchScript *Script) *TermsSetQuery { + q.minimumShouldMatchScript = minimumShouldMatchScript + return q +} + +// Boost sets the boost for this query. +func (q *TermsSetQuery) Boost(boost float64) *TermsSetQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsSetQuery) QueryName(queryName string) *TermsSetQuery { + q.queryName = queryName + return q +} + +// Source creates the query source for the term query. +func (q *TermsSetQuery) Source() (interface{}, error) { + // {"terms_set":{"codes":{"terms":["abc","def"],"minimum_should_match_field":"required_matches"}}} + source := make(map[string]interface{}) + inner := make(map[string]interface{}) + params := make(map[string]interface{}) + inner[q.name] = params + source["terms_set"] = inner + + // terms + params["terms"] = q.values + + // minimum_should_match_field + if match := q.minimumShouldMatchField; match != "" { + params["minimum_should_match_field"] = match + } + + // minimum_should_match_script + if match := q.minimumShouldMatchScript; match != nil { + src, err := match.Source() + if err != nil { + return nil, err + } + params["minimum_should_match_script"] = src + } + + // Common parameters for all queries + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set_test.go new file mode 100644 index 000000000..e13fbfb2f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms_set_test.go @@ -0,0 +1,75 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "context" + "encoding/json" + "testing" +) + +func TestTermsSetQueryWithField(t *testing.T) { + q := NewTermsSetQuery("codes", "abc", "def", "ghi").MinimumShouldMatchField("required_matches") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms_set":{"codes":{"minimum_should_match_field":"required_matches","terms":["abc","def","ghi"]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsSetQueryWithScript(t *testing.T) { + q := NewTermsSetQuery("codes", "abc", "def", "ghi"). + MinimumShouldMatchScript( + NewScript(`Math.min(params.num_terms, doc['required_matches'].value)`), + ) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms_set":{"codes":{"minimum_should_match_script":{"source":"Math.min(params.num_terms, doc['required_matches'].value)"},"terms":["abc","def","ghi"]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchTermsSetQuery(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query( + NewTermsSetQuery("user", "olivere", "sandrae"). + MinimumShouldMatchField("retweets"), + ). + Pretty(true). + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(3); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 3; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_request.go b/vendor/gopkg.in/olivere/elastic.v5/search_request.go index 6f40ff028..7ee4ce82c 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_request.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_request.go @@ -4,7 +4,10 @@ package elastic -import "strings" +import ( + "encoding/json" + "strings" +) // SearchRequest combines a search request and its // query details (see SearchSource). @@ -130,17 +133,7 @@ func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest } func (r *SearchRequest) Source(source interface{}) *SearchRequest { - switch v := source.(type) { - case *SearchSource: - src, err := v.Source() - if err != nil { - // Do not do anything in case of an error - return r - } - r.source = src - default: - r.source = source - } + r.source = source return r } @@ -200,6 +193,34 @@ func (r *SearchRequest) header() interface{} { // Body is used e.g. by MultiSearch to get information about the search body // of one SearchRequest. // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html -func (r *SearchRequest) Body() interface{} { - return r.source +func (r *SearchRequest) Body() (string, error) { + switch t := r.source.(type) { + default: + body, err := json.Marshal(r.source) + if err != nil { + return "", err + } + return string(body), nil + case *SearchSource: + src, err := t.Source() + if err != nil { + return "", err + } + body, err := json.Marshal(src) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + if t != nil { + return *t, nil + } + return "{}", nil + } } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_test.go index 097c26525..586089aaa 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_test.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_test.go @@ -607,6 +607,61 @@ func TestSearchSource(t *testing.T) { } } +func TestSearchSourceWithString(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + + searchResult, err := client.Search(). + Index(testIndexName). + Source(`{"query":{"match_all":{}}}`). // sets the JSON request + Do(context.TODO()) + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + func TestSearchRawString(t *testing.T) { // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) client := setupTestClientAndCreateIndex(t) diff --git a/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc deleted file mode 100644 index 730e569b0..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|&{tU|gG(Cy=+c:u:/p#~["4!nADK merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci - diff --git a/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md deleted file mode 100644 index 97e61dbb6..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md deleted file mode 100644 index 61b183651..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/gopkg.in/square/go-jose.v1/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/gopkg.in/square/go-jose.v1/README.md deleted file mode 100644 index 60293ffa2..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/README.md +++ /dev/null @@ -1,212 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![release](https://img.shields.io/github/release/square/go-jose.svg?style=flat)](https://github.com/square/go-jose/releases) -[![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go deleted file mode 100644 index cd36c21da..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go +++ /dev/null @@ -1,520 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "gopkg.in/square/go-jose.v1/cipher" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - headers := rawHeader{ - Epk: &JsonWebKey{ - Key: &priv.PublicKey, - }, - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - if headers.Epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("square/go-jose: invalid public key in epk header") - } - - apuData := headers.Apu.bytes() - apvData := headers.Apv.bytes() - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) - } - - var keySize int - - switch KeyAlgorithm(headers.Alg) { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.Enc), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(headers.Alg, keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go deleted file mode 100644 index 018ad2e2d..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go +++ /dev/null @@ -1,468 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "errors" - "io" - "math/big" - "testing" -) - -func TestVectorsRSA(t *testing.T) { - // Sources: - // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm - // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: fromHexInt(` - a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 - ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c - bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd - bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 - ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), - E: 65537, - }, - D: fromHexInt(` - 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 - 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d - 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 - 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb - 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), - Primes: []*big.Int{ - fromHexInt(` - d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 - 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c - 2f26a471dcad212eac7ca39d`), - fromHexInt(` - cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 - 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af - 72bfe9a030e860b0288b5d77`), - }, - } - - input := fromHexBytes( - "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34") - - expectedPKCS := fromHexBytes(` - 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808 - 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d - 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb - 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b - cd23b528ceab3c31`) - - expectedOAEP := fromHexBytes(` - 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4 - 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618 - c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6 - 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5 - 210035d47ac72e8a`) - - // Mock random reader - randReader = bytes.NewReader(fromHexBytes(` - 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3 - a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5 - 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98 - 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`)) - defer resetRandReader() - - // RSA-PKCS1v1.5 encrypt - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - encryptedPKCS, err := enc.encrypt(input, RSA1_5) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP encrypt - encryptedOAEP, err := enc.encrypt(input, RSA_OAEP) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 { - t.Error("Output does not match expected value (OAEP)") - } - - // Need fake cipher for PKCS1v1.5 decrypt - resetRandReader() - aes := newAESGCM(len(input)) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - // RSA-PKCS1v1.5 decrypt - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen) - if err != nil { - t.Error("Decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP decrypt - decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen) - if err != nil { - t.Error("decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedOAEP) != 0 { - t.Error("output does not match expected value (OAEP)") - } -} - -func TestInvalidAlgorithmsRSA(t *testing.T) { - _, err := newRSARecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newRSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &rsaTestKey.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - err = enc.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16}) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = dec.signPayload([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -type failingKeyGenerator struct{} - -func (ctx failingKeyGenerator) keySize() int { - return 0 -} - -func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) { - return nil, rawHeader{}, errors.New("failed to generate key") -} - -func TestPKCSKeyGeneratorFailure(t *testing.T) { - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - generator := failingKeyGenerator{} - _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) - if err != ErrCryptoFailure { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidAlgorithmsEC(t *testing.T) { - _, err := newECDHRecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newECDSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(ecEncrypterVerifier) - enc.publicKey = &ecTestKey256.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidECKeyGen(t *testing.T) { - gen := ecKeyGenerator{ - size: 16, - algID: "A128GCM", - publicKey: &ecTestKey256.PublicKey, - } - - if gen.keySize() != 16 { - t.Error("ec key generator reported incorrect key size") - } - - _, _, err := gen.genKey() - if err != nil { - t.Error("ec key generator failed to generate key", err) - } -} - -func TestInvalidECDecrypt(t *testing.T) { - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - generator := randomKeyGenerator{size: 16} - - // Missing epk header - headers := rawHeader{ - Alg: string(ECDH_ES), - } - - _, err := dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with missing epk header") - } - - // Invalid epk header - headers.Epk = &JsonWebKey{} - - _, err = dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with invalid epk header") - } -} - -func TestDecryptWithIncorrectSize(t *testing.T) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - payload := make([]byte, 254) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } - - payload = make([]byte, 257) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } -} - -func TestPKCSDecryptNeverFails(t *testing.T) { - // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent - // side-channel timing attacks (Bleichenbacher attack in particular). - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - for i := 1; i < 50; i++ { - payload := make([]byte, 256) - _, err := io.ReadFull(rand.Reader, payload) - if err != nil { - t.Error("Unable to get random data:", err) - return - } - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err != nil { - t.Error("PKCS1v1.5 decrypt should never fail:", err) - return - } - } -} - -func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(32) - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 32) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 16) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - // Do some simple scrambling - ciphertext[128] ^= 0xFF - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func TestInvalidEllipticCurve(t *testing.T) { - signer256 := ecDecrypterSigner{privateKey: ecTestKey256} - signer384 := ecDecrypterSigner{privateKey: ecTestKey384} - signer521 := ecDecrypterSigner{privateKey: ecTestKey521} - - _, err := signer256.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-256 key") - } - _, err = signer256.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-256 key") - } - _, err = signer384.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-384 key") - } - _, err = signer384.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-384 key") - } - _, err = signer521.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-521 key") - } - _, err = signer521.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-521 key") - } -} - -func TestInvalidECPublicKey(t *testing.T) { - // Invalid key - invalid := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("MTEx"), - Y: fromBase64Int("MTEx"), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), - } - - headers := rawHeader{ - Alg: string(ECDH_ES), - Epk: &JsonWebKey{ - Key: &invalid.PublicKey, - }, - } - - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - _, err := dec.decryptKey(headers, nil, randomKeyGenerator{size: 16}) - if err == nil { - t.Fatal("decrypter accepted JWS with invalid ECDH public key") - } -} - -func TestInvalidAlgorithmEC(t *testing.T) { - err := ecEncrypterVerifier{publicKey: &ecTestKey256.PublicKey}.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Fatal("should not accept invalid/unsupported algorithm") - } -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go deleted file mode 100644 index 126b85ce2..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures the the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go deleted file mode 100644 index 40bcb20fa..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "io" - "strings" - "testing" -) - -func TestInvalidInputs(t *testing.T) { - key := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - nonce := []byte{ - 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad")) - - // Changed AAD, must fail - _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID")) - if err == nil { - t.Error("must detect invalid aad") - } - - // Empty ciphertext, must fail - _, err = aead.Open(nil, nonce, []byte{}, []byte("aad")) - if err == nil { - t.Error("must detect invalid/empty ciphertext") - } - - // Corrupt ciphertext, must fail - corrupt := make([]byte, len(ciphertext)) - copy(corrupt, ciphertext) - corrupt[0] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt ciphertext") - } - - // Corrupt authtag, must fail - copy(corrupt, ciphertext) - corrupt[len(ciphertext)-1] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } - - // Truncated data, must fail - _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } -} - -func TestVectorsAESCBC128(t *testing.T) { - // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2 - plaintext := []byte{ - 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32, - 112, 114, 111, 115, 112, 101, 114, 46} - - aad := []byte{ - 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, - 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105, - 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85, - 50, 73, 110, 48} - - expectedCiphertext := []byte{ - 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6, - 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143, - 112, 56, 102} - - expectedAuthtag := []byte{ - 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100, - 191} - - key := []byte{ - 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206, - 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207} - - nonce := []byte{ - 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match") - } - if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match") - } -} - -func TestVectorsAESCBC256(t *testing.T) { - // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4 - plaintext := []byte{ - 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, - 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, - 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69, - 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, - 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65} - - aad := []byte{ - 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20, - 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73} - - expectedCiphertext := []byte{ - 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd, - 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd, - 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2, - 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b, - 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1, - 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3, - 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e, - 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b, - 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6} - - expectedAuthtag := []byte{ - 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf, - 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5} - - key := []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f} - - nonce := []byte{ - 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext) - } - if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag) - } -} - -func TestAESCBCRoundtrip(t *testing.T) { - key128 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - key192 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7} - - key256 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - RunRoundtrip(t, key128, nonce) - RunRoundtrip(t, key192, nonce) - RunRoundtrip(t, key256, nonce) -} - -func RunRoundtrip(t *testing.T, key, nonce []byte) { - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.NonceSize() != len(nonce) { - panic("invalid nonce") - } - - // Test pre-existing data in dst buffer - dst := []byte{15, 15, 15, 15} - plaintext := []byte{0, 0, 0, 0} - aad := []byte{4, 3, 2, 1} - - result := aead.Seal(dst, nonce, plaintext, aad) - if bytes.Compare(dst, result[:4]) != 0 { - t.Error("Existing data in dst not preserved") - } - - // Test pre-existing (empty) dst buffer with sufficient capacity - dst = make([]byte, 256)[:0] - result, err = aead.Open(dst, nonce, result[4:], aad) - if err != nil { - panic(err) - } - - if bytes.Compare(result, plaintext) != 0 { - t.Error("Plaintext does not match output") - } -} - -func TestAESCBCOverhead(t *testing.T) { - aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.Overhead() != 32 { - t.Error("CBC-HMAC reports incorrect overhead value") - } -} - -func TestPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - unpadded, err := unpadBuffer(padded, 16) - if err != nil || len(unpadded) != i { - t.Error("failed to unpad slice properly", i) - return - } - } -} - -func TestInvalidKey(t *testing.T) { - key := make([]byte, 30) - _, err := NewCBCHMAC(key, aes.NewCipher) - if err == nil { - t.Error("should not be able to instantiate CBC-HMAC with invalid key") - } -} - -func TestTruncatedCiphertext(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - data := make([]byte, 32) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - ctx := aead.(*cbcAEAD) - ct := aead.Seal(nil, nonce, data, nil) - - // Truncated ciphertext, but with correct auth tag - truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], uint64(len(ct))-2) - copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes])) - - // Open should fail - _, err = aead.Open(nil, nonce, truncated, nil) - if err == nil { - t.Error("open on truncated ciphertext should fail") - } -} - -func TestInvalidPaddingOpen(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - - // Plaintext with invalid padding - plaintext := padBuffer(make([]byte, 28), aes.BlockSize) - plaintext[len(plaintext)-1] = 0xFF - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - block, _ := aes.NewCipher(key) - cbc := cipher.NewCBCEncrypter(block, nonce) - buffer := append([]byte{}, plaintext...) - cbc.CryptBlocks(buffer, buffer) - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ctx := aead.(*cbcAEAD) - - // Mutated ciphertext, but with correct auth tag - size := uint64(len(buffer)) - ciphertext, tail := resize(buffer, size+(uint64(len(key))/2)) - copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size])) - - // Open should fail (b/c of invalid padding, even though tag matches) - _, err := aead.Open(nil, nonce, ciphertext, nil) - if err == nil || !strings.Contains(err.Error(), "invalid padding") { - t.Error("no or unexpected error on open with invalid padding:", err) - } -} - -func TestInvalidPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - - paddingBytes := 16 - (i % 16) - - // Mutate padding for testing - for j := 1; j <= paddingBytes; j++ { - mutated := make([]byte, len(padded)) - copy(mutated, padded) - mutated[len(mutated)-j] ^= 0xFF - - _, err := unpadBuffer(mutated, 16) - if err == nil { - t.Error("unpad on invalid padding should fail", i) - return - } - } - - // Test truncated padding - _, err := unpadBuffer(padded[:len(padded)-1], 16) - if err == nil { - t.Error("unpad on truncated padding should fail", i) - return - } - } -} - -func TestZeroLengthPadding(t *testing.T) { - data := make([]byte, 16) - data, err := unpadBuffer(data, 16) - if err == nil { - t.Error("padding with 0x00 should never be valid") - } -} - -func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Seal(nil, nonce, chunk, nil) - } -} - -func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - out := aead.Seal(nil, nonce, chunk, nil) - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Open(nil, nonce, out, nil) - } -} - -func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1024) -} - -func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 67108864) -} - -func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1032) -} - -func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 67108864) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go deleted file mode 100644 index 48219b3e1..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "testing" -) - -// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt -func TestVectorConcatKDF(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77} - - ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101} - ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98} - - supPubInfo := []byte{0, 0, 0, 128} - supPrivInfo := []byte{} - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - out0 := make([]byte, 9) - out1 := make([]byte, 7) - - read0, err := ckdf.Read(out0) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - read1, err := ckdf.Read(out1) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - if read0+read1 != len(out0)+len(out1) { - t.Error("did not receive enough bytes from concat kdf reader") - return - } - - out := []byte{} - out = append(out, out0...) - out = append(out, out1...) - - if bytes.Compare(out, expected) != 0 { - t.Error("did not receive expected output from concat kdf reader") - return - } -} - -func TestCache(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - outputs := [][]byte{} - - // Read the same amount of data in different chunk sizes - chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512} - - for _, c := range chunkSizes { - out := make([]byte, 1024) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - for i := 0; i < 1024; i += c { - _, _ = reader.Read(out[i : i+c]) - } - - outputs = append(outputs, out) - } - - for i := range outputs { - if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 { - t.Error("not all outputs from KDF matched") - } - } -} - -func benchmarkKDF(b *testing.B, total int) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - out := make([]byte, total) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - b.ResetTimer() - b.SetBytes(int64(total)) - for i := 0; i < b.N; i++ { - _, _ = reader.Read(out) - } -} - -func BenchmarkConcatKDF_1k(b *testing.B) { - benchmarkKDF(b, 1024) -} - -func BenchmarkConcatKDF_64k(b *testing.B) { - benchmarkKDF(b, 65536) -} - -func BenchmarkConcatKDF_1MB(b *testing.B) { - benchmarkKDF(b, 1048576) -} - -func BenchmarkConcatKDF_64MB(b *testing.B) { - benchmarkKDF(b, 67108864) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go deleted file mode 100644 index f23d49e1f..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go +++ /dev/null @@ -1,62 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "crypto/ecdsa" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - return key -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go deleted file mode 100644 index ca2c508dd..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go +++ /dev/null @@ -1,115 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/base64" - "math/big" - "testing" -) - -// Example keys from JWA, Appendix C -var aliceKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="), - Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), -} - -var bobKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="), - Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="), - }, - D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="), -} - -// Build big int from base64-encoded string. Strips whitespace (for testing). -func fromBase64Int(data string) *big.Int { - val, err := base64.URLEncoding.DecodeString(data) - if err != nil { - panic("Invalid test data") - } - return new(big.Int).SetBytes(val) -} - -func TestVectorECDHES(t *testing.T) { - apuData := []byte("Alice") - apvData := []byte("Bob") - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - - if bytes.Compare(output, expected) != 0 { - t.Error("output did not match what we expect, got", output, "wanted", expected) - } -} - -func TestInvalidECPublicKey(t *testing.T) { - defer func() { recover() }() - - // Invalid key - invalid := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("MTEx"), - Y: fromBase64Int("MTEx"), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), - } - - DeriveECDHES("A128GCM", []byte{}, []byte{}, bobKey, &invalid.PublicKey, 16) - t.Fatal("should panic if public key was invalid") -} - -func BenchmarkECDHES_128(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - } -} - -func BenchmarkECDHES_192(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24) - } -} - -func BenchmarkECDHES_256(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32) - } -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go deleted file mode 100644 index 1d36d5015..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go deleted file mode 100644 index ceecf812b..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "encoding/hex" - "testing" -) - -func TestAesKeyWrap(t *testing.T) { - // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf - kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617") - cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D") - - kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F") - cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607") - - expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1") - - block0, _ := aes.NewCipher(kek0) - block1, _ := aes.NewCipher(kek1) - block2, _ := aes.NewCipher(kek2) - - out0, _ := KeyWrap(block0, cek0) - out1, _ := KeyWrap(block1, cek1) - out2, _ := KeyWrap(block2, cek2) - - if bytes.Compare(out0, expected0) != 0 { - t.Error("output 0 not as expected, got", out0, "wanted", expected0) - } - - if bytes.Compare(out1, expected1) != 0 { - t.Error("output 1 not as expected, got", out1, "wanted", expected1) - } - - if bytes.Compare(out2, expected2) != 0 { - t.Error("output 2 not as expected, got", out2, "wanted", expected2) - } - - unwrap0, _ := KeyUnwrap(block0, out0) - unwrap1, _ := KeyUnwrap(block1, out1) - unwrap2, _ := KeyUnwrap(block2, out2) - - if bytes.Compare(unwrap0, cek0) != 0 { - t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0) - } - - if bytes.Compare(unwrap1, cek1) != 0 { - t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1) - } - - if bytes.Compare(unwrap2, cek2) != 0 { - t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2) - } -} - -func TestAesKeyWrapInvalid(t *testing.T) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - - // Invalid unwrap input (bit flipped) - input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - _, err := KeyUnwrap(block, input0) - if err == nil { - t.Error("key unwrap failed to detect invalid input") - } - - // Invalid unwrap input (truncated) - input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF") - - _, err = KeyUnwrap(block, input1) - if err == nil { - t.Error("key unwrap failed to detect truncated input") - } - - // Invalid wrap input (not multiple of 8) - input2, _ := hex.DecodeString("0123456789ABCD") - - _, err = KeyWrap(block, input2) - if err == nil { - t.Error("key wrap accepted invalid input") - } - -} - -func BenchmarkAesKeyWrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyWrap(block, key) - } -} - -func BenchmarkAesKeyUnwrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyUnwrap(block, input) - } -} diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/gopkg.in/square/go-jose.v1/crypter.go deleted file mode 100644 index b3bdaec80..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/crypter.go +++ /dev/null @@ -1,416 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) -} - -// MultiEncrypter represents an encrypter which supports multiple recipients. -type MultiEncrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) - AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// SetCompression sets a compression algorithm to be applied before encryption. -func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { - ctx.compressionAlg = compressionAlg -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := encryptionKey.(type) { - case *JsonWebKey: - keyID = encryptionKey.KeyID - rawKey = encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch alg { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.AddRecipient(alg, encryptionKey) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { - var recipient recipientKeyInfo - - switch alg { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) - } - - recipient, err = makeJWERecipient(alg, encryptionKey) - - if err == nil { - ctx.recipients = append(ctx.recipients, recipient) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case *JsonWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - if err == nil && encryptionKey.KeyID != "" { - recipient.keyID = encryptionKey.KeyID - } - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case *JsonWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{ - Enc: ctx.contentAlg, - } - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - recipient.header.Alg = string(info.keyAlg) - if info.keyID != "" { - recipient.header.Kid = info.keyID - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - obj.protected.Zip = ctx.compressionAlg - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") - } - - if len(headers.Crit) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.Enc) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -func (obj JsonWebEncryption) DecryptMulti(decryptionKey interface{}) (int, JoseHeader, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - if len(globalHeaders.Crit) > 0 { - return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, JoseHeader{}, nil, err - } - - cipher := getContentCipher(globalHeaders.Enc) - if cipher == nil { - return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(globalHeaders.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, JoseHeader{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return index, headers.sanitized(), plaintext, err -} diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/gopkg.in/square/go-jose.v1/crypter_test.go deleted file mode 100644 index 431f65378..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/crypter_test.go +++ /dev/null @@ -1,785 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "fmt" - "io" - "testing" -) - -// We generate only a single RSA and EC key for testing, speeds up tests. -var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048) - -var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) -var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - -func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - return fmt.Errorf("error on new encrypter: %s", err) - } - - enc.SetCompression(compressionAlg) - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.EncryptWithAuthData(input, aad) - if err != nil { - return fmt.Errorf("error in encrypt: %s", err) - } - - msg, err := serializer(obj) - if err != nil { - return fmt.Errorf("error in serializer: %s", err) - } - - parsed, err := ParseEncrypted(msg) - if err != nil { - return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg) - } - - // (Maybe) mangle object - skip := corrupter(parsed) - if skip { - return fmt.Errorf("corrupter indicated message should be skipped") - } - - if bytes.Compare(parsed.GetAuthData(), aad) != 0 { - return fmt.Errorf("auth data in parsed object does not match") - } - - output, err := parsed.Decrypt(decryptionKey) - if err != nil { - return fmt.Errorf("error on decrypt: %s", err) - } - - if bytes.Compare(input, output) != 0 { - return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input) - } - - return nil -} - -func TestRoundtripsJWE(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{ - DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW, - RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err != nil { - t.Error(err, alg, enc, zip, i) - } - } - } - } - } - } -} - -func TestRoundtripsJWECorrupted(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - bitflip := func(slice []byte) bool { - if len(slice) > 0 { - slice[0] ^= 0xFF - return false - } - return true - } - - corrupters := []func(*JsonWebEncryption) bool{ - func(obj *JsonWebEncryption) bool { - // Set invalid ciphertext - return bitflip(obj.ciphertext) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid auth tag - return bitflip(obj.tag) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid AAD - return bitflip(obj.aad) - }, - func(obj *JsonWebEncryption) bool { - // Mess with encrypted key - return bitflip(obj.recipients[0].encryptedKey) - }, - func(obj *JsonWebEncryption) bool { - // Mess with GCM-KW auth tag - return bitflip(obj.protected.Tag.bytes()) - }, - } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - for j, corrupter := range corrupters { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err == nil { - t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j) - } - } - } - } - } - } - } -} - -func TestEncrypterWithJWKAndKeyID(t *testing.T) { - enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{ - KeyID: "test-id", - Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }) - if err != nil { - t.Error(err) - } - - ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet")) - - serialized1, _ := ciphertext.CompactSerialize() - serialized2 := ciphertext.FullSerialize() - - parsed1, _ := ParseEncrypted(serialized1) - parsed2, _ := ParseEncrypted(serialized2) - - if parsed1.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID) - } - if parsed2.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID) - } -} - -func TestEncrypterWithBrokenRand(t *testing.T) { - keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - - serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() } - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Break rand reader - readers := []func() io.Reader{ - // Totally broken - func() io.Reader { return bytes.NewReader([]byte{}) }, - // Not enough bytes - func() io.Reader { return io.LimitReader(rand.Reader, 20) }, - } - - defer resetRandReader() - - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for i, getReader := range readers { - randReader = getReader() - err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec) - if err == nil { - t.Error("encrypter should fail if rand is broken", i) - } - } - } - } - } -} - -func TestNewEncrypterErrors(t *testing.T) { - _, err := NewEncrypter("XYZ", "XYZ", nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid cipher") - } - - _, err = NewMultiEncrypter("XYZ") - if err == nil { - t.Error("was able to instantiate multi-encrypter with invalid cipher") - } - - _, err = NewEncrypter(DIRECT, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid direct key") - } - - _, err = NewEncrypter(ECDH_ES, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid EC key") - } -} - -func TestMultiRecipientJWE(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey) - if err != nil { - t.Fatal("error when adding RSA recipient", err) - } - - sharedKey := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - err = enc.AddRecipient(A256GCMKW, sharedKey) - if err != nil { - t.Fatal("error when adding AES recipient: ", err) - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.Encrypt(input) - if err != nil { - t.Fatal("error in encrypt: ", err) - } - - msg := obj.FullSerialize() - - parsed, err := ParseEncrypted(msg) - if err != nil { - t.Fatal("error in parse: ", err) - } - - i, _, output, err := parsed.DecryptMulti(rsaTestKey) - if err != nil { - t.Fatal("error on decrypt with RSA: ", err) - } - - if i != 0 { - t.Fatal("recipient index should be 0 for RSA key") - } - - if bytes.Compare(input, output) != 0 { - t.Fatal("Decrypted output does not match input: ", output, input) - } - - i, _, output, err = parsed.DecryptMulti(sharedKey) - if err != nil { - t.Fatal("error on decrypt with AES: ", err) - } - - if i != 1 { - t.Fatal("recipient index should be 1 for shared key") - } - - if bytes.Compare(input, output) != 0 { - t.Fatal("Decrypted output does not match input", output, input) - } -} - -func TestMultiRecipientErrors(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - input := []byte("Lorem ipsum dolor sit amet") - _, err = enc.Encrypt(input) - if err == nil { - t.Error("should fail when encrypting to zero recipients") - } - - err = enc.AddRecipient(DIRECT, nil) - if err == nil { - t.Error("should reject DIRECT mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(ECDH_ES, nil) - if err == nil { - t.Error("should reject ECDH_ES mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(RSA1_5, nil) - if err == nil { - t.Error("should reject invalid recipient key") - } -} - -type testKey struct { - enc, dec interface{} -} - -func symmetricTestKey(size int) []testKey { - key, _, _ := randomKeyGenerator{size: size}.genKey() - - return []testKey{ - testKey{ - enc: key, - dec: key, - }, - testKey{ - enc: &JsonWebKey{KeyID: "test", Key: key}, - dec: &JsonWebKey{KeyID: "test", Key: key}, - }, - } -} - -func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey { - switch keyAlg { - case DIRECT: - return symmetricTestKey(getContentCipher(encAlg).keySize()) - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return []testKey{ - testKey{ - dec: ecTestKey256, - enc: &ecTestKey256.PublicKey, - }, - testKey{ - dec: ecTestKey384, - enc: &ecTestKey384.PublicKey, - }, - testKey{ - dec: ecTestKey521, - enc: &ecTestKey521.PublicKey, - }, - testKey{ - dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256}, - enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey}, - }, - } - case A128GCMKW, A128KW: - return symmetricTestKey(16) - case A192GCMKW, A192KW: - return symmetricTestKey(24) - case A256GCMKW, A256KW: - return symmetricTestKey(32) - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return []testKey{testKey{ - dec: rsaTestKey, - enc: &rsaTestKey.PublicKey, - }} - } - - panic("Must update test case") -} - -func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) { - serializer := func(obj *JsonWebEncryption) (string, error) { - return obj.CompactSerialize() - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv) - if err != nil { - b.Error(err) - } - } -} - -var ( - chunks = map[string][]byte{ - "1B": make([]byte, 1), - "64B": make([]byte, 64), - "1KB": make([]byte, 1024), - "64KB": make([]byte, 65536), - "1MB": make([]byte, 1048576), - "64MB": make([]byte, 67108864), - } - - symKey, _, _ = randomKeyGenerator{size: 32}.genKey() - - encrypters = map[string]Encrypter{ - "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey), - "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey), - "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey), - "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey), - "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey), - "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey), - "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey), - "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey), - "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey), - "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey), - "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey), - "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey), - "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey), - } -) - -func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) } -func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) } -func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) } -func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) } -func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) } -func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) } -func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) } -func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) } -func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) } -func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) } -func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) } -func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) } -func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) } -func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) } - -func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) } -func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) } -func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) } -func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) } -func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) } -func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) } - -func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) } -func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) } -func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) } -func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) } -func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) } -func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) } - -func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) } -func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) } -func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) } -func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) } -func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) } -func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) } - -func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchEncrypt(chunkKey, primKey string, b *testing.B) { - data, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - enc.Encrypt(data) - } -} - -var ( - decryptionKeys = map[string]interface{}{ - "OAEPAndGCM": rsaTestKey, - "PKCSAndGCM": rsaTestKey, - "OAEPAndCBC": rsaTestKey, - "PKCSAndCBC": rsaTestKey, - - "DirectGCM128": symKey, - "DirectCBC128": symKey, - "DirectGCM256": symKey, - "DirectCBC256": symKey, - - "AESKWAndGCM128": symKey, - "AESKWAndCBC256": symKey, - - "ECDHOnP256AndGCM128": ecTestKey256, - "ECDHOnP384AndGCM128": ecTestKey384, - "ECDHOnP521AndGCM128": ecTestKey521, - } -) - -func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) } -func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) } -func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) } -func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) } -func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) } -func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) } -func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) } -func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) } -func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) } -func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) } -func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) } -func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) } -func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) } -func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) } - -func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) } -func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) } -func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) } -func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) } -func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) } -func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) } - -func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) } -func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) } -func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) } -func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) } -func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) } -func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) } - -func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) } -func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) } -func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) } -func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) } -func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) } -func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) } - -func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchDecrypt(chunkKey, primKey string, b *testing.B) { - chunk, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - dec, ok := decryptionKeys[primKey] - if !ok { - b.Fatalf("unknown decryption key %s", primKey) - } - - data, err := enc.Encrypt(chunk) - if err != nil { - b.Fatal(err) - } - - b.SetBytes(int64(len(chunk))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - data.Decrypt(dec) - } -} - -func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - panic(err) - } - return enc -} diff --git a/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/gopkg.in/square/go-jose.v1/doc.go deleted file mode 100644 index b4cd1e989..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on -encryption and signing based on the JSON Web Encryption and JSON Web Signature -standards. The library supports both the compact and full serialization -formats, and has optional support for multiple recipients. - -*/ -package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/gopkg.in/square/go-jose.v1/doc_test.go deleted file mode 100644 index 50468295d..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/doc_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -// Dummy encrypter for use in examples -var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{}) - -func Example_jWE() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would - // indicate that the selected algorithm(s) are not currently supported. - publicKey := &privateKey.PublicKey - encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) - if err != nil { - panic(err) - } - - // Encrypt a sample plaintext. Calling the encrypter returns an encrypted - // JWE object, which can then be serialized for output afterwards. An error - // would indicate a problem in an underlying cryptographic primitive. - var plaintext = []byte("Lorem ipsum dolor sit amet") - object, err := encrypter.Encrypt(plaintext) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, encrypted JWE object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseEncrypted(serialized) - if err != nil { - panic(err) - } - - // Now we can decrypt and get back our original plaintext. An error here - // would indicate the the message failed to decrypt, e.g. because the auth - // tag was broken or the message was tampered with. - decrypted, err := object.Decrypt(privateKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(decrypted)) - // output: Lorem ipsum dolor sit amet -} - -func Example_jWS() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. - signer, err := NewSigner(PS512, privateKey) - if err != nil { - panic(err) - } - - // Sign a sample payload. Calling the signer returns a protected JWS object, - // which can then be serialized for output afterwards. An error would - // indicate a problem in an underlying cryptographic primitive. - var payload = []byte("Lorem ipsum dolor sit amet") - object, err := signer.Sign(payload) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, protected JWS object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseSigned(serialized) - if err != nil { - panic(err) - } - - // Now we can verify the signature on the payload. An error here would - // indicate the the message failed to verify, e.g. because the signature was - // broken or the message was tampered with. - output, err := object.Verify(&privateKey.PublicKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(output)) - // output: Lorem ipsum dolor sit amet -} - -func ExampleNewEncrypter_publicKey() { - var publicKey *rsa.PublicKey - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. - NewEncrypter(RSA_OAEP, A128GCM, publicKey) - - // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC. - NewEncrypter(RSA1_5, A128CBC_HS256, publicKey) -} - -func ExampleNewEncrypter_symmetric() { - var sharedKey []byte - - // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap. - NewEncrypter(A128GCMKW, A128GCM, sharedKey) - - // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping. - NewEncrypter(DIRECT, A256GCM, sharedKey) -} - -func ExampleNewSigner_publicKey() { - var rsaPrivateKey *rsa.PrivateKey - var ecdsaPrivateKey *ecdsa.PrivateKey - - // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256. - NewSigner(RS256, rsaPrivateKey) - - // Instantiate a signer using ECDSA with SHA-384. - NewSigner(ES384, ecdsaPrivateKey) -} - -func ExampleNewSigner_symmetric() { - var sharedKey []byte - - // Instantiate an signer using HMAC-SHA256. - NewSigner(HS256, sharedKey) - - // Instantiate an signer using HMAC-SHA512. - NewSigner(HS512, sharedKey) -} - -func ExampleNewMultiEncrypter() { - var publicKey *rsa.PublicKey - var sharedKey []byte - - // Instantiate an encrypter using AES-GCM. - encrypter, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - // Add a recipient using a shared key with AES-GCM key wap - err = encrypter.AddRecipient(A128GCMKW, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA public key with RSA-OAEP - err = encrypter.AddRecipient(RSA_OAEP, publicKey) - if err != nil { - panic(err) - } -} - -func ExampleNewMultiSigner() { - var privateKey *rsa.PrivateKey - var sharedKey []byte - - // Instantiate a signer for multiple recipients. - signer := NewMultiSigner() - - // Add a recipient using a shared key with HMAC-SHA256 - err := signer.AddRecipient(HS256, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA private key with RSASSA-PSS with SHA384 - err = signer.AddRecipient(PS384, privateKey) - if err != nil { - panic(err) - } -} - -func ExampleEncrypter_encrypt() { - // Encrypt a plaintext in order to get an encrypted JWE object. - var plaintext = []byte("This is a secret message") - - encrypter.Encrypt(plaintext) -} - -func ExampleEncrypter_encryptWithAuthData() { - // Encrypt a plaintext in order to get an encrypted JWE object. Also attach - // some additional authenticated data (AAD) to the object. Note that objects - // with attached AAD can only be represented using full serialization. - var plaintext = []byte("This is a secret message") - var aad = []byte("This is authenticated, but public data") - - encrypter.EncryptWithAuthData(plaintext, aad) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/gopkg.in/square/go-jose.v1/encoding.go deleted file mode 100644 index dde0a42db..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/encoding.go +++ /dev/null @@ -1,193 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "regexp" - "strings" - - "gopkg.in/square/go-jose.v1/json" -) - -var stripWhitespaceRegex = regexp.MustCompile("\\s") - -// Url-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// Url-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - return stripWhitespaceRegex.ReplaceAllString(data, "") -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64URLDecode(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64URLEncode(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/gopkg.in/square/go-jose.v1/encoding_test.go deleted file mode 100644 index e2f8d979c..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/encoding_test.go +++ /dev/null @@ -1,173 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "strings" - "testing" -) - -func TestBase64URLEncode(t *testing.T) { - // Test arrays with various sizes - if base64URLEncode([]byte{}) != "" { - t.Error("failed to encode empty array") - } - - if base64URLEncode([]byte{0}) != "AA" { - t.Error("failed to encode [0x00]") - } - - if base64URLEncode([]byte{0, 1}) != "AAE" { - t.Error("failed to encode [0x00, 0x01]") - } - - if base64URLEncode([]byte{0, 1, 2}) != "AAEC" { - t.Error("failed to encode [0x00, 0x01, 0x02]") - } - - if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" { - t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestBase64URLDecode(t *testing.T) { - // Test arrays with various sizes - val, err := base64URLDecode("") - if err != nil || !bytes.Equal(val, []byte{}) { - t.Error("failed to decode empty array") - } - - val, err = base64URLDecode("AA") - if err != nil || !bytes.Equal(val, []byte{0}) { - t.Error("failed to decode [0x00]") - } - - val, err = base64URLDecode("AAE") - if err != nil || !bytes.Equal(val, []byte{0, 1}) { - t.Error("failed to decode [0x00, 0x01]") - } - - val, err = base64URLDecode("AAEC") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) { - t.Error("failed to decode [0x00, 0x01, 0x02]") - } - - val, err = base64URLDecode("AAECAw") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) { - t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestDeflateRoundtrip(t *testing.T) { - original := []byte("Lorem ipsum dolor sit amet") - - compressed, err := deflate(original) - if err != nil { - panic(err) - } - - output, err := inflate(compressed) - if err != nil { - panic(err) - } - - if bytes.Compare(output, original) != 0 { - t.Error("Input and output do not match") - } -} - -func TestInvalidCompression(t *testing.T) { - _, err := compress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress(DEFLATE, []byte{1, 2, 3, 4}) - if err == nil { - t.Error("should not accept invalid data") - } -} - -func TestByteBufferTrim(t *testing.T) { - buf := newBufferFromInt(1) - if !bytes.Equal(buf.data, []byte{1}) { - t.Error("Byte buffer for integer '1' should contain [0x01]") - } - - buf = newBufferFromInt(65537) - if !bytes.Equal(buf.data, []byte{1, 0, 1}) { - t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]") - } -} - -func TestFixedSizeBuffer(t *testing.T) { - data0 := []byte{} - data1 := []byte{1} - data2 := []byte{1, 2} - data3 := []byte{1, 2, 3} - data4 := []byte{1, 2, 3, 4} - - buf0 := newFixedSizeBuffer(data0, 4) - buf1 := newFixedSizeBuffer(data1, 4) - buf2 := newFixedSizeBuffer(data2, 4) - buf3 := newFixedSizeBuffer(data3, 4) - buf4 := newFixedSizeBuffer(data4, 4) - - if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) { - t.Error("Invalid padded buffer for buf0") - } - if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) { - t.Error("Invalid padded buffer for buf1") - } - if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) { - t.Error("Invalid padded buffer for buf2") - } - if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) { - t.Error("Invalid padded buffer for buf3") - } - if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) { - t.Error("Invalid padded buffer for buf4") - } -} - -func TestSerializeJSONRejectsNil(t *testing.T) { - defer func() { - r := recover() - if r == nil || !strings.Contains(r.(string), "nil pointer") { - t.Error("serialize function should not accept nil pointer") - } - }() - - mustSerializeJSON(nil) -} - -func TestFixedSizeBufferTooLarge(t *testing.T) { - defer func() { - r := recover() - if r == nil { - t.Error("should not be able to create fixed size buffer with oversized data") - } - }() - - newFixedSizeBuffer(make([]byte, 2), 1) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md b/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md deleted file mode 100644 index 6cfe6a718..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/jose-util/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# JOSE CLI - -The `jose-util` command line utility allows for encryption, decryption, signing -and verification of JOSE messages. Its main purpose is to facilitate dealing -with JOSE messages when testing or debugging. - -## Usage - -The utility includes the subcommands `encrypt`, `decrypt`, `sign`, `verify` and -`expand`. Examples for each command can be found below. - -Algorithms are selected via the `--alg` and `--enc` flags, which influence the -`alg` and `enc` headers in respectively. For JWE, `--alg` specifies the key -managment algorithm (e.g. `RSA-OAEP`) and `--enc` specifies the content -encryption algorithm (e.g. `A128GCM`). For JWS, `--alg` specifies the -signature algorithm (e.g. `PS256`). - -Input and output files can be specified via the `--in` and `--out` flags. -Either flag can be omitted, in which case `jose-util` uses stdin/stdout for -input/output respectively. By default each command will output a compact -message, but it's possible to get the full serialization by supplying the -`--full` flag. - -Keys are specified via the `--key` flag. Supported key types are naked RSA/EC -keys and X.509 certificates with embedded RSA/EC keys. Keys must be in PEM -or DER formats. - -## Examples - -### Encrypt - -Takes a plaintext as input, encrypts, and prints the encrypted message. - - jose-util encrypt -k public-key.pem --alg RSA-OAEP --enc A128GCM - -### Decrypt - -Takes an encrypted message (JWE) as input, decrypts, and prints the plaintext. - - jose-util decrypt -k private-key.pem - -### Sign - -Takes a payload as input, signs it, and prints the signed message with the embedded payload. - - jose-util sign -k private-key.pem --alg PS256 - -### Verify - -Reads a signed message (JWS), verifies it, and extracts the payload. - - jose-util verify -k public-key.pem - -### Expand - -Expands a compact message to the full serialization format. - - jose-util expand --format JWE # Expands a compact JWE to full format - jose-util expand --format JWS # Expands a compact JWS to full format diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t b/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t deleted file mode 100644 index c0d747bb0..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/jose-util/jose-util.t +++ /dev/null @@ -1,94 +0,0 @@ -Set up test keys. - - $ cat > rsa.pub < -----BEGIN PUBLIC KEY----- - > MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAslWybuiNYR7uOgKuvaBw - > qVk8saEutKhOAaW+3hWF65gJei+ZV8QFfYDxs9ZaRZlWAUMtncQPnw7ZQlXO9ogN - > 5cMcN50C6qMOOZzghK7danalhF5lUETC4Hk3Eisbi/PR3IfVyXaRmqL6X66MKj/J - > AKyD9NFIDVy52K8A198Jojnrw2+XXQW72U68fZtvlyl/BTBWQ9Re5JSTpEcVmpCR - > 8FrFc0RPMBm+G5dRs08vvhZNiTT2JACO5V+J5ZrgP3s5hnGFcQFZgDnXLInDUdoi - > 1MuCjaAU0ta8/08pHMijNix5kFofdPEB954MiZ9k4kQ5/utt02I9x2ssHqw71ojj - > vwIDAQAB - > -----END PUBLIC KEY----- - > EOF - - $ cat > rsa.key < -----BEGIN RSA PRIVATE KEY----- - > MIIEogIBAAKCAQEAslWybuiNYR7uOgKuvaBwqVk8saEutKhOAaW+3hWF65gJei+Z - > V8QFfYDxs9ZaRZlWAUMtncQPnw7ZQlXO9ogN5cMcN50C6qMOOZzghK7danalhF5l - > UETC4Hk3Eisbi/PR3IfVyXaRmqL6X66MKj/JAKyD9NFIDVy52K8A198Jojnrw2+X - > XQW72U68fZtvlyl/BTBWQ9Re5JSTpEcVmpCR8FrFc0RPMBm+G5dRs08vvhZNiTT2 - > JACO5V+J5ZrgP3s5hnGFcQFZgDnXLInDUdoi1MuCjaAU0ta8/08pHMijNix5kFof - > dPEB954MiZ9k4kQ5/utt02I9x2ssHqw71ojjvwIDAQABAoIBABrYDYDmXom1BzUS - > PE1s/ihvt1QhqA8nmn5i/aUeZkc9XofW7GUqq4zlwPxKEtKRL0IHY7Fw1s0hhhCX - > LA0uE7F3OiMg7lR1cOm5NI6kZ83jyCxxrRx1DUSO2nxQotfhPsDMbaDiyS4WxEts - > 0cp2SYJhdYd/jTH9uDfmt+DGwQN7Jixio1Dj3vwB7krDY+mdre4SFY7Gbk9VxkDg - > LgCLMoq52m+wYufP8CTgpKFpMb2/yJrbLhuJxYZrJ3qd/oYo/91k6v7xlBKEOkwD - > 2veGk9Dqi8YPNxaRktTEjnZb6ybhezat93+VVxq4Oem3wMwou1SfXrSUKtgM/p2H - > vfw/76ECgYEA2fNL9tC8u9M0wjA+kvvtDG96qO6O66Hksssy6RWInD+Iqk3MtHQt - > LeoCjvX+zERqwOb6SI6empk5pZ9E3/9vJ0dBqkxx3nqn4M/nRWnExGgngJsL959t - > f50cdxva8y1RjNhT4kCwTrupX/TP8lAG8SfG1Alo2VFR8iWd8hDQcTECgYEA0Xfj - > EgqAsVh4U0s3lFxKjOepEyp0G1Imty5J16SvcOEAD1Mrmz94aSSp0bYhXNVdbf7n - > Rk77htWC7SE29fGjOzZRS76wxj/SJHF+rktHB2Zt23k1jBeZ4uLMPMnGLY/BJ099 - > 5DTGo0yU0rrPbyXosx+ukfQLAHFuggX4RNeM5+8CgYB7M1J/hGMLcUpjcs4MXCgV - > XXbiw2c6v1r9zmtK4odEe42PZ0cNwpY/XAZyNZAAe7Q0stxL44K4NWEmxC80x7lX - > ZKozz96WOpNnO16qGC3IMHAT/JD5Or+04WTT14Ue7UEp8qcIQDTpbJ9DxKk/eglS - > jH+SIHeKULOXw7fSu7p4IQKBgBnyVchIUMSnBtCagpn4DKwDjif3nEY+GNmb/D2g - > ArNiy5UaYk5qwEmV5ws5GkzbiSU07AUDh5ieHgetk5dHhUayZcOSLWeBRFCLVnvU - > i0nZYEZNb1qZGdDG8zGcdNXz9qMd76Qy/WAA/nZT+Zn1AiweAovFxQ8a/etRPf2Z - > DbU1AoGAHpCgP7B/4GTBe49H0AQueQHBn4RIkgqMy9xiMeR+U+U0vaY0TlfLhnX+ - > 5PkNfkPXohXlfL7pxwZNYa6FZhCAubzvhKCdUASivkoGaIEk6g1VTVYS/eDVQ4CA - > slfl+elXtLq/l1kQ8C14jlHrQzSXx4PQvjDEnAmaHSJNz4mP9Fg= - > -----END RSA PRIVATE KEY----- - > EOF - - $ cat > ec.pub < -----BEGIN PUBLIC KEY----- - > MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE9yoUEAgxTd9svwe9oPqjhcP+f2jcdTL2 - > Wq8Aw2v9ht1dBy00tFRPNrCxFCkvMcJFhSPoDUV5NL7zfh3/psiSNYziGPrWEJYf - > gmYihjSeoOf0ru1erpBrTflImPrMftCy - > -----END PUBLIC KEY----- - > EOF - - $ cat > ec.key < -----BEGIN EC PRIVATE KEY----- - > MIGkAgEBBDDvoj/bM1HokUjYWO/IDFs26Jo0GIFtU3tMQQu7ZabKscDMK3dZA0mK - > v97ij7BBFbCgBwYFK4EEACKhZANiAAT3KhQQCDFN32y/B72g+qOFw/5/aNx1MvZa - > rwDDa/2G3V0HLTS0VE82sLEUKS8xwkWFI+gNRXk0vvN+Hf+myJI1jOIY+tYQlh+C - > ZiKGNJ6g5/Su7V6ukGtN+UiY+sx+0LI= - > -----END EC PRIVATE KEY----- - > EOF - -Encrypt and then decrypt a test message (RSA). - - $ echo "Lorem ipsum dolor sit amet" | - > jose-util encrypt --alg RSA-OAEP --enc A128GCM --key rsa.pub | - > jose-util decrypt --key rsa.key - Lorem ipsum dolor sit amet - -Encrypt and then decrypt a test message (EC). - - $ echo "Lorem ipsum dolor sit amet" | - > jose-util encrypt --alg ECDH-ES+A128KW --enc A128GCM --key ec.pub | - > jose-util decrypt --key ec.key - Lorem ipsum dolor sit amet - -Sign and verify a test message (RSA). - - $ echo "Lorem ipsum dolor sit amet" | - > jose-util sign --alg PS256 --key rsa.key | - > jose-util verify --key rsa.pub - Lorem ipsum dolor sit amet - -Sign and verify a test message (EC). - - $ echo "Lorem ipsum dolor sit amet" | - > jose-util sign --alg ES384 --key ec.key | - > jose-util verify --key ec.pub - Lorem ipsum dolor sit amet - -Expand a compact message to full format. - - $ echo "eyJhbGciOiJFUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQK.QPU35XY913Im7ZEaN2yHykfbtPqjHZvYp-lV8OcTAJZs67bJFSdTSkQhQWE9ch6tvYrj_7py6HKaWVFLll_s_Rm6bmwq3JszsHrIvFFm1NydruYHhvAnx7rjYiqwOu0W" | - > jose-util expand --format JWS - {"payload":"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQK","protected":"eyJhbGciOiJFUzM4NCJ9","signature":"QPU35XY913Im7ZEaN2yHykfbtPqjHZvYp-lV8OcTAJZs67bJFSdTSkQhQWE9ch6tvYrj_7py6HKaWVFLll_s_Rm6bmwq3JszsHrIvFFm1NydruYHhvAnx7rjYiqwOu0W"} diff --git a/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go b/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go deleted file mode 100644 index 7ae93ee76..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/jose-util/main.go +++ /dev/null @@ -1,189 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "io/ioutil" - "os" - - "gopkg.in/alecthomas/kingpin.v2" - "gopkg.in/square/go-jose.v1" -) - -var ( - app = kingpin.New("jose-util", "A command-line utility for dealing with JOSE objects.") - - keyFile = app.Flag("key", "Path to key file (PEM or DER-encoded)").ExistingFile() - inFile = app.Flag("in", "Path to input file (stdin if missing)").ExistingFile() - outFile = app.Flag("out", "Path to output file (stdout if missing)").ExistingFile() - - encryptCommand = app.Command("encrypt", "Encrypt a plaintext, output ciphertext.") - algFlag = encryptCommand.Flag("alg", "Key management algorithm (e.g. RSA-OAEP)").Required().String() - encFlag = encryptCommand.Flag("enc", "Content encryption algorithm (e.g. A128GCM)").Required().String() - - decryptCommand = app.Command("decrypt", "Decrypt a ciphertext, output plaintext.") - - signCommand = app.Command("sign", "Sign a payload, output signed message.") - sigAlgFlag = signCommand.Flag("alg", "Key management algorithm (e.g. RSA-OAEP)").Required().String() - - verifyCommand = app.Command("verify", "Verify a signed message, output payload.") - - expandCommand = app.Command("expand", "Expand JOSE object to full serialization format.") - formatFlag = expandCommand.Flag("format", "Type of message to expand (JWS or JWE, defaults to JWE)").String() - - full = app.Flag("full", "Use full serialization format (instead of compact)").Bool() -) - -func main() { - app.Version("v1") - - command := kingpin.MustParse(app.Parse(os.Args[1:])) - - var keyBytes []byte - var err error - if command != "expand" { - keyBytes, err = ioutil.ReadFile(*keyFile) - exitOnError(err, "unable to read key file") - } - - switch command { - case "encrypt": - pub, err := jose.LoadPublicKey(keyBytes) - exitOnError(err, "unable to read public key") - - alg := jose.KeyAlgorithm(*algFlag) - enc := jose.ContentEncryption(*encFlag) - - crypter, err := jose.NewEncrypter(alg, enc, pub) - exitOnError(err, "unable to instantiate encrypter") - - obj, err := crypter.Encrypt(readInput(*inFile)) - exitOnError(err, "unable to encrypt") - - var msg string - if *full { - msg = obj.FullSerialize() - } else { - msg, err = obj.CompactSerialize() - exitOnError(err, "unable to serialize message") - } - - writeOutput(*outFile, []byte(msg)) - case "decrypt": - priv, err := jose.LoadPrivateKey(keyBytes) - exitOnError(err, "unable to read private key") - - obj, err := jose.ParseEncrypted(string(readInput(*inFile))) - exitOnError(err, "unable to parse message") - - plaintext, err := obj.Decrypt(priv) - exitOnError(err, "unable to decrypt message") - - writeOutput(*outFile, plaintext) - case "sign": - signingKey, err := jose.LoadPrivateKey(keyBytes) - exitOnError(err, "unable to read private key") - - alg := jose.SignatureAlgorithm(*sigAlgFlag) - signer, err := jose.NewSigner(alg, signingKey) - exitOnError(err, "unable to make signer") - - obj, err := signer.Sign(readInput(*inFile)) - exitOnError(err, "unable to sign") - - var msg string - if *full { - msg = obj.FullSerialize() - } else { - msg, err = obj.CompactSerialize() - exitOnError(err, "unable to serialize message") - } - - writeOutput(*outFile, []byte(msg)) - case "verify": - verificationKey, err := jose.LoadPublicKey(keyBytes) - exitOnError(err, "unable to read private key") - - obj, err := jose.ParseSigned(string(readInput(*inFile))) - exitOnError(err, "unable to parse message") - - plaintext, err := obj.Verify(verificationKey) - exitOnError(err, "invalid signature") - - writeOutput(*outFile, plaintext) - case "expand": - input := string(readInput(*inFile)) - - var serialized string - var err error - switch *formatFlag { - case "", "JWE": - var jwe *jose.JsonWebEncryption - jwe, err = jose.ParseEncrypted(input) - if err == nil { - serialized = jwe.FullSerialize() - } - case "JWS": - var jws *jose.JsonWebSignature - jws, err = jose.ParseSigned(input) - if err == nil { - serialized = jws.FullSerialize() - } - } - - exitOnError(err, "unable to expand message") - writeOutput(*outFile, []byte(serialized)) - writeOutput(*outFile, []byte("\n")) - } -} - -// Exit and print error message if we encountered a problem -func exitOnError(err error, msg string) { - if err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", msg, err) - os.Exit(1) - } -} - -// Read input from file or stdin -func readInput(path string) []byte { - var bytes []byte - var err error - - if path != "" { - bytes, err = ioutil.ReadFile(path) - } else { - bytes, err = ioutil.ReadAll(os.Stdin) - } - - exitOnError(err, "unable to read input") - return bytes -} - -// Write output to file or stdin -func writeOutput(path string, data []byte) { - var err error - - if path != "" { - err = ioutil.WriteFile(path, data, 0644) - } else { - _, err = os.Stdout.Write(data) - } - - exitOnError(err, "unable to write output") -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/gopkg.in/square/go-jose.v1/json/README.md deleted file mode 100644 index 86de5e558..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go deleted file mode 100644 index ed89d1156..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Large data benchmark. -// The JSON data is a summary of agl's changes in the -// go, webkit, and chromium open source projects. -// We benchmark converting between the JSON form -// and in-memory data structures. - -package json - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "os" - "strings" - "testing" -) - -type codeResponse struct { - Tree *codeNode `json:"tree"` - Username string `json:"username"` -} - -type codeNode struct { - Name string `json:"name"` - Kids []*codeNode `json:"kids"` - CLWeight float64 `json:"cl_weight"` - Touches int `json:"touches"` - MinT int64 `json:"min_t"` - MaxT int64 `json:"max_t"` - MeanT int64 `json:"mean_t"` -} - -var codeJSON []byte -var codeStruct codeResponse - -func codeInit() { - f, err := os.Open("testdata/code.json.gz") - if err != nil { - panic(err) - } - defer f.Close() - gz, err := gzip.NewReader(f) - if err != nil { - panic(err) - } - data, err := ioutil.ReadAll(gz) - if err != nil { - panic(err) - } - - codeJSON = data - - if err := Unmarshal(codeJSON, &codeStruct); err != nil { - panic("unmarshal code.json: " + err.Error()) - } - - if data, err = Marshal(&codeStruct); err != nil { - panic("marshal code.json: " + err.Error()) - } - - if !bytes.Equal(data, codeJSON) { - println("different lengths", len(data), len(codeJSON)) - for i := 0; i < len(data) && i < len(codeJSON); i++ { - if data[i] != codeJSON[i] { - println("re-marshal: changed at byte", i) - println("orig: ", string(codeJSON[i-10:i+10])) - println("new: ", string(data[i-10:i+10])) - break - } - } - panic("re-marshal code.json: different result") - } -} - -func BenchmarkCodeEncoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - enc := NewEncoder(ioutil.Discard) - for i := 0; i < b.N; i++ { - if err := enc.Encode(&codeStruct); err != nil { - b.Fatal("Encode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeMarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - if _, err := Marshal(&codeStruct); err != nil { - b.Fatal("Marshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeDecoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var buf bytes.Buffer - dec := NewDecoder(&buf) - var r codeResponse - for i := 0; i < b.N; i++ { - buf.Write(codeJSON) - // hide EOF - buf.WriteByte('\n') - buf.WriteByte('\n') - buf.WriteByte('\n') - if err := dec.Decode(&r); err != nil { - b.Fatal("Decode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkDecoderStream(b *testing.B) { - b.StopTimer() - var buf bytes.Buffer - dec := NewDecoder(&buf) - buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") - var x interface{} - if err := dec.Decode(&x); err != nil { - b.Fatal("Decode:", err) - } - ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" - b.StartTimer() - for i := 0; i < b.N; i++ { - if i%300000 == 0 { - buf.WriteString(ones) - } - x = nil - if err := dec.Decode(&x); err != nil || x != 1.0 { - b.Fatalf("Decode: %v after %d", err, i) - } - } -} - -func BenchmarkCodeUnmarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - var r codeResponse - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeUnmarshalReuse(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var r codeResponse - for i := 0; i < b.N; i++ { - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } -} - -func BenchmarkUnmarshalString(b *testing.B) { - data := []byte(`"hello, world"`) - var s string - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &s); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalFloat64(b *testing.B) { - var f float64 - data := []byte(`3.14`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &f); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalInt64(b *testing.B) { - var x int64 - data := []byte(`3`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &x); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkIssue10335(b *testing.B) { - b.ReportAllocs() - var s struct{} - j := []byte(`{"a":{ }}`) - for n := 0; n < b.N; n++ { - if err := Unmarshal(j, &s); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/gopkg.in/square/go-jose.v1/json/decode.go deleted file mode 100644 index 37457e5a8..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/decode.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go deleted file mode 100644 index 32394654e..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "encoding" - "fmt" - "image" - "net" - "reflect" - "strings" - "testing" - "time" -) - -type T struct { - X string - Y int - Z int `json:"-"` -} - -type U struct { - Alphabet string `json:"alpha"` -} - -type V struct { - F1 interface{} - F2 int32 - F3 Number -} - -// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and -// without UseNumber -var ifaceNumAsFloat64 = map[string]interface{}{ - "k1": float64(1), - "k2": "s", - "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, - "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, -} - -var ifaceNumAsNumber = map[string]interface{}{ - "k1": Number("1"), - "k2": "s", - "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, - "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, -} - -type tx struct { - x int -} - -// A type that can unmarshal itself. - -type unmarshaler struct { - T bool -} - -func (u *unmarshaler) UnmarshalJSON(b []byte) error { - *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. - return nil -} - -type ustruct struct { - M unmarshaler -} - -type unmarshalerText struct { - T bool -} - -// needed for re-marshaling tests -func (u *unmarshalerText) MarshalText() ([]byte, error) { - return []byte(""), nil -} - -func (u *unmarshalerText) UnmarshalText(b []byte) error { - *u = unmarshalerText{true} // All we need to see that UnmarshalText is called. - return nil -} - -var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) - -type ustructText struct { - M unmarshalerText -} - -var ( - um0, um1 unmarshaler // target2 of unmarshaling - ump = &um1 - umtrue = unmarshaler{true} - umslice = []unmarshaler{{true}} - umslicep = new([]unmarshaler) - umstruct = ustruct{unmarshaler{true}} - - um0T, um1T unmarshalerText // target2 of unmarshaling - umpT = &um1T - umtrueT = unmarshalerText{true} - umsliceT = []unmarshalerText{{true}} - umslicepT = new([]unmarshalerText) - umstructT = ustructText{unmarshalerText{true}} -) - -// Test data structures for anonymous fields. - -type Point struct { - Z int -} - -type Top struct { - Level0 int - Embed0 - *Embed0a - *Embed0b `json:"e,omitempty"` // treated as named - Embed0c `json:"-"` // ignored - Loop - Embed0p // has Point with X, Y, used - Embed0q // has Point with Z, used - embed // contains exported field -} - -type Embed0 struct { - Level1a int // overridden by Embed0a's Level1a with json tag - Level1b int // used because Embed0a's Level1b is renamed - Level1c int // used because Embed0a's Level1c is ignored - Level1d int // annihilated by Embed0a's Level1d - Level1e int `json:"x"` // annihilated by Embed0a.Level1e -} - -type Embed0a struct { - Level1a int `json:"Level1a,omitempty"` - Level1b int `json:"LEVEL1B,omitempty"` - Level1c int `json:"-"` - Level1d int // annihilated by Embed0's Level1d - Level1f int `json:"x"` // annihilated by Embed0's Level1e -} - -type Embed0b Embed0 - -type Embed0c Embed0 - -type Embed0p struct { - image.Point -} - -type Embed0q struct { - Point -} - -type embed struct { - Q int -} - -type Loop struct { - Loop1 int `json:",omitempty"` - Loop2 int `json:",omitempty"` - *Loop -} - -// From reflect test: -// The X in S6 and S7 annihilate, but they also block the X in S8.S9. -type S5 struct { - S6 - S7 - S8 -} - -type S6 struct { - X int -} - -type S7 S6 - -type S8 struct { - S9 -} - -type S9 struct { - X int - Y int -} - -// From reflect test: -// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. -type S10 struct { - S11 - S12 - S13 -} - -type S11 struct { - S6 -} - -type S12 struct { - S6 -} - -type S13 struct { - S8 -} - -type unmarshalTest struct { - in string - ptr interface{} - out interface{} - err error - useNumber bool -} - -type XYZ struct { - X interface{} - Y interface{} - Z interface{} -} - -func sliceAddr(x []int) *[]int { return &x } -func mapAddr(x map[string]int) *map[string]int { return &x } - -var unmarshalTests = []unmarshalTest{ - // basic types - {in: `true`, ptr: new(bool), out: true}, - {in: `1`, ptr: new(int), out: 1}, - {in: `1.2`, ptr: new(float64), out: 1.2}, - {in: `-5`, ptr: new(int16), out: int16(-5)}, - {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, - {in: `2`, ptr: new(Number), out: Number("2")}, - {in: `2`, ptr: new(interface{}), out: float64(2.0)}, - {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, - {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, - {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, - {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, - {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, - {in: "null", ptr: new(interface{}), out: nil}, - {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, - {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, - - // raw values with whitespace - {in: "\n true ", ptr: new(bool), out: true}, - {in: "\t 1 ", ptr: new(int), out: 1}, - {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, - {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, - {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, - - // Z has a "-" tag. - {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, - - {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, - - // syntax errors - {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, - {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, - {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, - - // raw value errors - {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, - {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, - {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, - {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, - - // array tests - {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, - {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, - {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, - - // empty array to interface test - {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, - {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, - {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, - {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, - - // composite tests - {in: allValueIndent, ptr: new(All), out: allValue}, - {in: allValueCompact, ptr: new(All), out: allValue}, - {in: allValueIndent, ptr: new(*All), out: &allValue}, - {in: allValueCompact, ptr: new(*All), out: &allValue}, - {in: pallValueIndent, ptr: new(All), out: pallValue}, - {in: pallValueCompact, ptr: new(All), out: pallValue}, - {in: pallValueIndent, ptr: new(*All), out: &pallValue}, - {in: pallValueCompact, ptr: new(*All), out: &pallValue}, - - // unmarshal interface test - {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called - {in: `{"T":false}`, ptr: &ump, out: &umtrue}, - {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, - {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, - {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct}, - - // UnmarshalText interface test - {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called - {in: `"X"`, ptr: &umpT, out: &umtrueT}, - {in: `["X"]`, ptr: &umsliceT, out: umsliceT}, - {in: `["X"]`, ptr: &umslicepT, out: &umsliceT}, - {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT}, - - // Overwriting of data. - // This is different from package xml, but it's what we've always done. - // Now documented and tested. - {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, - {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, - - { - in: `{ - "Level0": 1, - "Level1b": 2, - "Level1c": 3, - "x": 4, - "Level1a": 5, - "LEVEL1B": 6, - "e": { - "Level1a": 8, - "Level1b": 9, - "Level1c": 10, - "Level1d": 11, - "x": 12 - }, - "Loop1": 13, - "Loop2": 14, - "X": 15, - "Y": 16, - "Z": 17, - "Q": 18 - }`, - ptr: new(Top), - out: Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - }, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S5), - out: S5{S8: S8{S9: S9{Y: 2}}}, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S10), - out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, - }, - - // invalid UTF-8 is coerced to valid UTF-8. - { - in: "\"hello\xffworld\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\xc2\xc2world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xc2\xffworld\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800world\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", - ptr: new(string), - out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", - }, - - // issue 8305 - { - in: `{"2009-11-10T23:00:00Z": "hello world"}`, - ptr: &map[time.Time]string{}, - err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1}, - }, -} - -func TestMarshal(t *testing.T) { - b, err := Marshal(allValue) - if err != nil { - t.Fatalf("Marshal allValue: %v", err) - } - if string(b) != allValueCompact { - t.Errorf("Marshal allValueCompact") - diff(t, b, []byte(allValueCompact)) - return - } - - b, err = Marshal(pallValue) - if err != nil { - t.Fatalf("Marshal pallValue: %v", err) - } - if string(b) != pallValueCompact { - t.Errorf("Marshal pallValueCompact") - diff(t, b, []byte(pallValueCompact)) - return - } -} - -var badUTF8 = []struct { - in, out string -}{ - {"hello\xffworld", `"hello\ufffdworld"`}, - {"", `""`}, - {"\xff", `"\ufffd"`}, - {"\xff\xff", `"\ufffd\ufffd"`}, - {"a\xffb", `"a\ufffdb"`}, - {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, -} - -func TestMarshalBadUTF8(t *testing.T) { - for _, tt := range badUTF8 { - b, err := Marshal(tt.in) - if string(b) != tt.out || err != nil { - t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) - } - } -} - -func TestMarshalNumberZeroVal(t *testing.T) { - var n Number - out, err := Marshal(n) - if err != nil { - t.Fatal(err) - } - outStr := string(out) - if outStr != "0" { - t.Fatalf("Invalid zero val for Number: %q", outStr) - } -} - -func TestMarshalEmbeds(t *testing.T) { - top := &Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - } - b, err := Marshal(top) - if err != nil { - t.Fatal(err) - } - want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" - if string(b) != want { - t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) - } -} - -func TestUnmarshal(t *testing.T) { - for i, tt := range unmarshalTests { - var scan scanner - in := []byte(tt.in) - if err := checkValid(in, &scan); err != nil { - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: checkValid: %#v", i, err) - continue - } - } - if tt.ptr == nil { - continue - } - - // v = new(right-type) - v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec := NewDecoder(bytes.NewReader(in)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: %v, want %v", i, err, tt.err) - continue - } else if err != nil { - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) - data, _ := Marshal(v.Elem().Interface()) - println(string(data)) - data, _ = Marshal(tt.out) - println(string(data)) - continue - } - - // Check round trip. - if tt.err == nil { - enc, err := Marshal(v.Interface()) - if err != nil { - t.Errorf("#%d: error re-marshaling: %v", i, err) - continue - } - vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec = NewDecoder(bytes.NewReader(enc)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(vv.Interface()); err != nil { - t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) - t.Errorf(" In: %q", strings.Map(noSpace, string(in))) - t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) - continue - } - } - } -} - -func TestUnmarshalMarshal(t *testing.T) { - initBig() - var v interface{} - if err := Unmarshal(jsonBig, &v); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - b, err := Marshal(v) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(jsonBig, b) { - t.Errorf("Marshal jsonBig") - diff(t, b, jsonBig) - return - } -} - -var numberTests = []struct { - in string - i int64 - intErr string - f float64 - floatErr string -}{ - {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, - {in: "-12", i: -12, f: -12.0}, - {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, -} - -// Independent of Decode, basic coverage of the accessors in Number -func TestNumberAccessors(t *testing.T) { - for _, tt := range numberTests { - n := Number(tt.in) - if s := n.String(); s != tt.in { - t.Errorf("Number(%q).String() is %q", tt.in, s) - } - if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { - t.Errorf("Number(%q).Int64() is %d", tt.in, i) - } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { - t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) - } - if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { - t.Errorf("Number(%q).Float64() is %g", tt.in, f) - } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { - t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) - } - } -} - -func TestLargeByteSlice(t *testing.T) { - s0 := make([]byte, 2000) - for i := range s0 { - s0[i] = byte(i) - } - b, err := Marshal(s0) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - var s1 []byte - if err := Unmarshal(b, &s1); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !bytes.Equal(s0, s1) { - t.Errorf("Marshal large byte slice") - diff(t, s0, s1) - } -} - -type Xint struct { - X int -} - -func TestUnmarshalInterface(t *testing.T) { - var xint Xint - var i interface{} = &xint - if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestUnmarshalPtrPtr(t *testing.T) { - var xint Xint - pxint := &xint - if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestEscape(t *testing.T) { - const input = `"foobar"` + " [\u2028 \u2029]" - const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` - b, err := Marshal(input) - if err != nil { - t.Fatalf("Marshal error: %v", err) - } - if s := string(b); s != expected { - t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) - } -} - -// WrongString is a struct that's misusing the ,string modifier. -type WrongString struct { - Message string `json:"result,string"` -} - -type wrongStringTest struct { - in, err string -} - -var wrongStringTests = []wrongStringTest{ - {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, - {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, - {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, - {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, -} - -// If people misuse the ,string modifier, the error message should be -// helpful, telling the user that they're doing it wrong. -func TestErrorMessageFromMisusedString(t *testing.T) { - for n, tt := range wrongStringTests { - r := strings.NewReader(tt.in) - var s WrongString - err := NewDecoder(r).Decode(&s) - got := fmt.Sprintf("%v", err) - if got != tt.err { - t.Errorf("%d. got err = %q, want %q", n, got, tt.err) - } - } -} - -func noSpace(c rune) rune { - if isSpace(byte(c)) { //only used for ascii - return -1 - } - return c -} - -type All struct { - Bool bool - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - Uintptr uintptr - Float32 float32 - Float64 float64 - - Foo string `json:"bar"` - Foo2 string `json:"bar2,dummyopt"` - - IntStr int64 `json:",string"` - - PBool *bool - PInt *int - PInt8 *int8 - PInt16 *int16 - PInt32 *int32 - PInt64 *int64 - PUint *uint - PUint8 *uint8 - PUint16 *uint16 - PUint32 *uint32 - PUint64 *uint64 - PUintptr *uintptr - PFloat32 *float32 - PFloat64 *float64 - - String string - PString *string - - Map map[string]Small - MapP map[string]*Small - PMap *map[string]Small - PMapP *map[string]*Small - - EmptyMap map[string]Small - NilMap map[string]Small - - Slice []Small - SliceP []*Small - PSlice *[]Small - PSliceP *[]*Small - - EmptySlice []Small - NilSlice []Small - - StringSlice []string - ByteSlice []byte - - Small Small - PSmall *Small - PPSmall **Small - - Interface interface{} - PInterface *interface{} - - unexported int -} - -type Small struct { - Tag string -} - -var allValue = All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Uintptr: 12, - Float32: 14.1, - Float64: 15.1, - Foo: "foo", - Foo2: "foo2", - IntStr: 42, - String: "16", - Map: map[string]Small{ - "17": {Tag: "tag17"}, - "18": {Tag: "tag18"}, - }, - MapP: map[string]*Small{ - "19": {Tag: "tag19"}, - "20": nil, - }, - EmptyMap: map[string]Small{}, - Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, - SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, - EmptySlice: []Small{}, - StringSlice: []string{"str24", "str25", "str26"}, - ByteSlice: []byte{27, 28, 29}, - Small: Small{Tag: "tag30"}, - PSmall: &Small{Tag: "tag31"}, - Interface: 5.2, -} - -var pallValue = All{ - PBool: &allValue.Bool, - PInt: &allValue.Int, - PInt8: &allValue.Int8, - PInt16: &allValue.Int16, - PInt32: &allValue.Int32, - PInt64: &allValue.Int64, - PUint: &allValue.Uint, - PUint8: &allValue.Uint8, - PUint16: &allValue.Uint16, - PUint32: &allValue.Uint32, - PUint64: &allValue.Uint64, - PUintptr: &allValue.Uintptr, - PFloat32: &allValue.Float32, - PFloat64: &allValue.Float64, - PString: &allValue.String, - PMap: &allValue.Map, - PMapP: &allValue.MapP, - PSlice: &allValue.Slice, - PSliceP: &allValue.SliceP, - PPSmall: &allValue.PSmall, - PInterface: &allValue.Interface, -} - -var allValueIndent = `{ - "Bool": true, - "Int": 2, - "Int8": 3, - "Int16": 4, - "Int32": 5, - "Int64": 6, - "Uint": 7, - "Uint8": 8, - "Uint16": 9, - "Uint32": 10, - "Uint64": 11, - "Uintptr": 12, - "Float32": 14.1, - "Float64": 15.1, - "bar": "foo", - "bar2": "foo2", - "IntStr": "42", - "PBool": null, - "PInt": null, - "PInt8": null, - "PInt16": null, - "PInt32": null, - "PInt64": null, - "PUint": null, - "PUint8": null, - "PUint16": null, - "PUint32": null, - "PUint64": null, - "PUintptr": null, - "PFloat32": null, - "PFloat64": null, - "String": "16", - "PString": null, - "Map": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "MapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "PMap": null, - "PMapP": null, - "EmptyMap": {}, - "NilMap": null, - "Slice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "SliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "PSlice": null, - "PSliceP": null, - "EmptySlice": [], - "NilSlice": null, - "StringSlice": [ - "str24", - "str25", - "str26" - ], - "ByteSlice": "Gxwd", - "Small": { - "Tag": "tag30" - }, - "PSmall": { - "Tag": "tag31" - }, - "PPSmall": null, - "Interface": 5.2, - "PInterface": null -}` - -var allValueCompact = strings.Map(noSpace, allValueIndent) - -var pallValueIndent = `{ - "Bool": false, - "Int": 0, - "Int8": 0, - "Int16": 0, - "Int32": 0, - "Int64": 0, - "Uint": 0, - "Uint8": 0, - "Uint16": 0, - "Uint32": 0, - "Uint64": 0, - "Uintptr": 0, - "Float32": 0, - "Float64": 0, - "bar": "", - "bar2": "", - "IntStr": "0", - "PBool": true, - "PInt": 2, - "PInt8": 3, - "PInt16": 4, - "PInt32": 5, - "PInt64": 6, - "PUint": 7, - "PUint8": 8, - "PUint16": 9, - "PUint32": 10, - "PUint64": 11, - "PUintptr": 12, - "PFloat32": 14.1, - "PFloat64": 15.1, - "String": "", - "PString": "16", - "Map": null, - "MapP": null, - "PMap": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "PMapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "EmptyMap": null, - "NilMap": null, - "Slice": null, - "SliceP": null, - "PSlice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "PSliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "EmptySlice": null, - "NilSlice": null, - "StringSlice": null, - "ByteSlice": null, - "Small": { - "Tag": "" - }, - "PSmall": null, - "PPSmall": { - "Tag": "tag31" - }, - "Interface": null, - "PInterface": 5.2 -}` - -var pallValueCompact = strings.Map(noSpace, pallValueIndent) - -func TestRefUnmarshal(t *testing.T) { - type S struct { - // Ref is defined in encode_test.go. - R0 Ref - R1 *Ref - R2 RefText - R3 *RefText - } - want := S{ - R0: 12, - R1: new(Ref), - R2: 13, - R3: new(RefText), - } - *want.R1 = 12 - *want.R3 = 13 - - var got S - if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %+v, want %+v", got, want) - } -} - -// Test that the empty string doesn't panic decoding when ,string is specified -// Issue 3450 -func TestEmptyString(t *testing.T) { - type T2 struct { - Number1 int `json:",string"` - Number2 int `json:",string"` - } - data := `{"Number1":"1", "Number2":""}` - dec := NewDecoder(strings.NewReader(data)) - var t2 T2 - err := dec.Decode(&t2) - if err == nil { - t.Fatal("Decode: did not return error") - } - if t2.Number1 != 1 { - t.Fatal("Decode: did not set Number1") - } -} - -// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). -// It should also not be an error (issue 2540, issue 8587). -func TestNullString(t *testing.T) { - type T struct { - A int `json:",string"` - B int `json:",string"` - C *int `json:",string"` - } - data := []byte(`{"A": "1", "B": null, "C": null}`) - var s T - s.B = 1 - s.C = new(int) - *s.C = 2 - err := Unmarshal(data, &s) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if s.B != 1 || s.C != nil { - t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) - } -} - -func intp(x int) *int { - p := new(int) - *p = x - return p -} - -func intpp(x *int) **int { - pp := new(*int) - *pp = x - return pp -} - -var interfaceSetTests = []struct { - pre interface{} - json string - post interface{} -}{ - {"foo", `"bar"`, "bar"}, - {"foo", `2`, 2.0}, - {"foo", `true`, true}, - {"foo", `null`, nil}, - - {nil, `null`, nil}, - {new(int), `null`, nil}, - {(*int)(nil), `null`, nil}, - {new(*int), `null`, new(*int)}, - {(**int)(nil), `null`, nil}, - {intp(1), `null`, nil}, - {intpp(nil), `null`, intpp(nil)}, - {intpp(intp(1)), `null`, intpp(nil)}, -} - -func TestInterfaceSet(t *testing.T) { - for _, tt := range interfaceSetTests { - b := struct{ X interface{} }{tt.pre} - blob := `{"X":` + tt.json + `}` - if err := Unmarshal([]byte(blob), &b); err != nil { - t.Errorf("Unmarshal %#q: %v", blob, err) - continue - } - if !reflect.DeepEqual(b.X, tt.post) { - t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) - } - } -} - -// JSON null values should be ignored for primitives and string values instead of resulting in an error. -// Issue 2540 -func TestUnmarshalNulls(t *testing.T) { - jsonData := []byte(`{ - "Bool" : null, - "Int" : null, - "Int8" : null, - "Int16" : null, - "Int32" : null, - "Int64" : null, - "Uint" : null, - "Uint8" : null, - "Uint16" : null, - "Uint32" : null, - "Uint64" : null, - "Float32" : null, - "Float64" : null, - "String" : null}`) - - nulls := All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Float32: 12.1, - Float64: 13.1, - String: "14"} - - err := Unmarshal(jsonData, &nulls) - if err != nil { - t.Errorf("Unmarshal of null values failed: %v", err) - } - if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || - nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || - nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { - - t.Errorf("Unmarshal of null values affected primitives") - } -} - -func TestStringKind(t *testing.T) { - type stringKind string - - var m1, m2 map[stringKind]int - m1 = map[stringKind]int{ - "foo": 42, - } - - data, err := Marshal(m1) - if err != nil { - t.Errorf("Unexpected error marshaling: %v", err) - } - - err = Unmarshal(data, &m2) - if err != nil { - t.Errorf("Unexpected error unmarshaling: %v", err) - } - - if !reflect.DeepEqual(m1, m2) { - t.Error("Items should be equal after encoding and then decoding") - } -} - -// Custom types with []byte as underlying type could not be marshalled -// and then unmarshalled. -// Issue 8962. -func TestByteKind(t *testing.T) { - type byteKind []byte - - a := byteKind("hello") - - data, err := Marshal(a) - if err != nil { - t.Error(err) - } - var b byteKind - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Errorf("expected %v == %v", a, b) - } -} - -// The fix for issue 8962 introduced a regression. -// Issue 12921. -func TestSliceOfCustomByte(t *testing.T) { - type Uint8 uint8 - - a := []Uint8("hello") - - data, err := Marshal(a) - if err != nil { - t.Fatal(err) - } - var b []Uint8 - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Fatalf("expected %v == %v", a, b) - } -} - -var decodeTypeErrorTests = []struct { - dest interface{} - src string -}{ - {new(string), `{"user": "name"}`}, // issue 4628. - {new(error), `{}`}, // issue 4222 - {new(error), `[]`}, - {new(error), `""`}, - {new(error), `123`}, - {new(error), `true`}, -} - -func TestUnmarshalTypeError(t *testing.T) { - for _, item := range decodeTypeErrorTests { - err := Unmarshal([]byte(item.src), item.dest) - if _, ok := err.(*UnmarshalTypeError); !ok { - t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", - item.src, item.dest, err) - } - } -} - -var unmarshalSyntaxTests = []string{ - "tru", - "fals", - "nul", - "123e", - `"hello`, - `[1,2,3`, - `{"key":1`, - `{"key":1,`, -} - -func TestUnmarshalSyntax(t *testing.T) { - var x interface{} - for _, src := range unmarshalSyntaxTests { - err := Unmarshal([]byte(src), &x) - if _, ok := err.(*SyntaxError); !ok { - t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) - } - } -} - -// Test handling of unexported fields that should be ignored. -// Issue 4660 -type unexportedFields struct { - Name string - m map[string]interface{} `json:"-"` - m2 map[string]interface{} `json:"abcd"` -} - -func TestUnmarshalUnexported(t *testing.T) { - input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` - want := &unexportedFields{Name: "Bob"} - - out := &unexportedFields{} - err := Unmarshal([]byte(input), out) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !reflect.DeepEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -// Time3339 is a time.Time which encodes to and from JSON -// as an RFC 3339 time in UTC. -type Time3339 time.Time - -func (t *Time3339) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) - } - tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) - if err != nil { - return err - } - *t = Time3339(tm) - return nil -} - -func TestUnmarshalJSONLiteralError(t *testing.T) { - var t3 Time3339 - err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) - if err == nil { - t.Fatalf("expected error; got time %v", time.Time(t3)) - } - if !strings.Contains(err.Error(), "range") { - t.Errorf("got err = %v; want out of range error", err) - } -} - -// Test that extra object elements in an array do not result in a -// "data changing underfoot" error. -// Issue 3717 -func TestSkipArrayObjects(t *testing.T) { - json := `[{}]` - var dest [0]interface{} - - err := Unmarshal([]byte(json), &dest) - if err != nil { - t.Errorf("got error %q, want nil", err) - } -} - -// Test semantics of pre-filled struct fields and pre-filled map fields. -// Issue 4900. -func TestPrefilled(t *testing.T) { - ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } - - // Values here change, cannot reuse table across runs. - var prefillTests = []struct { - in string - ptr interface{} - out interface{} - }{ - { - in: `{"X": 1, "Y": 2}`, - ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, - out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, - }, - { - in: `{"X": 1, "Y": 2}`, - ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), - out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), - }, - } - - for _, tt := range prefillTests { - ptrstr := fmt.Sprintf("%v", tt.ptr) - err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here - if err != nil { - t.Errorf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(tt.ptr, tt.out) { - t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) - } - } -} - -var invalidUnmarshalTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, -} - -func TestInvalidUnmarshal(t *testing.T) { - buf := []byte(`{"a":"1"}`) - for _, tt := range invalidUnmarshalTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -var invalidUnmarshalTextTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, - {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, -} - -func TestInvalidUnmarshalText(t *testing.T) { - buf := []byte(`123`) - for _, tt := range invalidUnmarshalTextTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -// Test that string option is ignored for invalid types. -// Issue 9812. -func TestInvalidStringOption(t *testing.T) { - num := 0 - item := struct { - T time.Time `json:",string"` - M map[string]string `json:",string"` - S []string `json:",string"` - A [1]string `json:",string"` - I interface{} `json:",string"` - P *int `json:",string"` - }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} - - data, err := Marshal(item) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - err = Unmarshal(data, &item) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/gopkg.in/square/go-jose.v1/json/encode.go deleted file mode 100644 index 1dae8bb7c..000000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML